Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions api/utils/keys/piv/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ var yubiKeyServiceMu sync.Mutex
// YubiKeyService is a YubiKey PIV implementation of [hardwarekey.Service].
type YubiKeyService struct {
prompt hardwarekey.Prompt
// TODO(Joerger): Remove prompt mutex once there is no longer a shared global service
// that needs its protection.
promptMu sync.Mutex

// signMu prevents prompting for PIN/touch repeatedly for concurrent signatures.
// TODO(Joerger): Rather than preventing concurrent signatures, we can make the
Expand All @@ -67,7 +70,7 @@ func NewYubiKeyService(customPrompt hardwarekey.Prompt) *YubiKeyService {
if yubiKeyService != nil {
// If a prompt is provided, prioritize it over the existing prompt value.
if customPrompt != nil {
yubiKeyService.prompt = customPrompt
yubiKeyService.setPrompt(customPrompt)
}
return yubiKeyService
}
Expand Down Expand Up @@ -116,7 +119,7 @@ func (s *YubiKeyService) NewPrivateKey(ctx context.Context, config hardwarekey.P

// If PIN is required, check that PIN and PUK are not the defaults.
if config.Policy.PINRequired {
if err := y.checkOrSetPIN(ctx, s.prompt, config.ContextualKeyInfo, config.PINCacheTTL); err != nil {
if err := y.checkOrSetPIN(ctx, s.getPrompt(), config.ContextualKeyInfo, config.PINCacheTTL); err != nil {
return nil, trace.Wrap(err)
}
}
Expand Down Expand Up @@ -188,7 +191,7 @@ func (s *YubiKeyService) Sign(ctx context.Context, ref *hardwarekey.PrivateKeyRe
s.signMu.Lock()
defer s.signMu.Unlock()

return y.sign(ctx, ref, keyInfo, s.prompt, rand, digest, opts)
return y.sign(ctx, ref, keyInfo, s.getPrompt(), rand, digest, opts)
}

// TODO(Joerger): Re-attesting the key every time we decode a hardware key signer is very resource
Expand Down Expand Up @@ -260,10 +263,22 @@ func (s *YubiKeyService) getYubiKey(serialNumber uint32) (*YubiKey, error) {

func (s *YubiKeyService) promptOverwriteSlot(ctx context.Context, msg string, keyInfo hardwarekey.ContextualKeyInfo) error {
promptQuestion := fmt.Sprintf("%v\nWould you like to overwrite this slot's private key and certificate?", msg)
if confirmed, confirmErr := s.prompt.ConfirmSlotOverwrite(ctx, promptQuestion, keyInfo); confirmErr != nil {
if confirmed, confirmErr := s.getPrompt().ConfirmSlotOverwrite(ctx, promptQuestion, keyInfo); confirmErr != nil {
return trace.Wrap(confirmErr)
} else if !confirmed {
return trace.Wrap(trace.CompareFailed(msg), "user declined to overwrite slot")
}
return nil
}

func (s *YubiKeyService) setPrompt(prompt hardwarekey.Prompt) {
s.promptMu.Lock()
defer s.promptMu.Unlock()
s.prompt = prompt
}

func (s *YubiKeyService) getPrompt() hardwarekey.Prompt {
s.promptMu.Lock()
defer s.promptMu.Unlock()
return s.prompt
}
25 changes: 16 additions & 9 deletions integration/proxy/teleterm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@ func testGatewayCertRenewal(ctx context.Context, t *testing.T, params gatewayCer
fakeClock := clockwork.NewFakeClockAt(time.Now())
storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
// Inject a fake clock into clusters.Storage so we can control when the middleware thinks the
// db cert has expired.
Expand All @@ -250,12 +251,14 @@ func testGatewayCertRenewal(ctx context.Context, t *testing.T, params gatewayCer
})
require.NoError(t, err)

tshdEventsClient := daemon.NewTshdEventsClient(func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
})

daemonService, err := daemon.New(daemon.Config{
Clock: fakeClock,
Storage: storage,
CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
},
Clock: fakeClock,
Storage: storage,
TshdEventsClient: tshdEventsClient,
CreateClientCacheFunc: func(newClient clientcache.NewClientFunc) (daemon.ClientCache, error) {
return clientcache.NewNoCache(newClient), nil
},
Expand Down Expand Up @@ -877,14 +880,18 @@ func testTeletermAppGatewayTargetPortValidation(t *testing.T, pack *appaccess.Pa

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)

tshdEventsClient := daemon.NewTshdEventsClient(func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
})

daemonService, err := daemon.New(daemon.Config{
Storage: storage,
CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
},
Storage: storage,
TshdEventsClient: tshdEventsClient,
CreateClientCacheFunc: func(newClient clientcache.NewClientFunc) (daemon.ClientCache, error) {
return clientcache.NewNoCache(newClient), nil
},
Expand Down
60 changes: 39 additions & 21 deletions integration/teleterm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,10 @@ func TestTeleterm(t *testing.T) {
func testAddingRootCluster(t *testing.T, pack *dbhelpers.DatabasePack, creds *helpers.UserCreds) {
t.Helper()

homeDir := t.TempDir()
storage, err := clusters.NewStorage(clusters.Config{
Dir: t.TempDir(),
Dir: homeDir,
ClientStore: client.NewFSClientStore(homeDir),
InsecureSkipVerify: true,
})
require.NoError(t, err)
Expand Down Expand Up @@ -288,6 +290,7 @@ func testListRootClustersReturnsLoggedInUser(t *testing.T, pack *dbhelpers.Datab

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)
Expand Down Expand Up @@ -370,6 +373,7 @@ func testGetClusterReturnsPropertiesFromAuthServer(t *testing.T, pack *dbhelpers

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)
Expand Down Expand Up @@ -422,20 +426,23 @@ func testHeadlessWatcher(t *testing.T, pack *dbhelpers.DatabasePack, creds *help

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)

cluster, _, err := storage.Add(ctx, tc.WebProxyAddr)
require.NoError(t, err)

tshdEventsClient := daemon.NewTshdEventsClient(func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
})

daemonService, err := daemon.New(daemon.Config{
Storage: storage,
CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
},
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
Storage: storage,
TshdEventsClient: tshdEventsClient,
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
})
require.NoError(t, err)
t.Cleanup(func() {
Expand Down Expand Up @@ -489,6 +496,7 @@ func testClientCache(t *testing.T, pack *dbhelpers.DatabasePack, creds *helpers.

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
Clock: storageFakeClock,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
Expand All @@ -497,13 +505,15 @@ func testClientCache(t *testing.T, pack *dbhelpers.DatabasePack, creds *helpers.
cluster, _, err := storage.Add(ctx, tc.WebProxyAddr)
require.NoError(t, err)

tshdEventsClient := daemon.NewTshdEventsClient(func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
})

daemonService, err := daemon.New(daemon.Config{
Storage: storage,
CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
},
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
Storage: storage,
TshdEventsClient: tshdEventsClient,
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
})
require.NoError(t, err)
t.Cleanup(func() {
Expand Down Expand Up @@ -747,8 +757,10 @@ func testCreateConnectMyComputerRole(t *testing.T, pack *dbhelpers.DatabasePack)
require.NoError(t, authServer.UpsertPassword(userName, []byte(userPassword)))

// Prepare daemon.Service.
homeDir := t.TempDir()
storage, err := clusters.NewStorage(clusters.Config{
Dir: t.TempDir(),
Dir: homeDir,
ClientStore: client.NewFSClientStore(homeDir),
InsecureSkipVerify: true,
})
require.NoError(t, err)
Expand Down Expand Up @@ -863,20 +875,23 @@ func testCreateConnectMyComputerToken(t *testing.T, pack *dbhelpers.DatabasePack
// Prepare daemon.Service.
storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
Clock: fakeClock,
WebauthnLogin: webauthnLogin,
})
require.NoError(t, err)

tshdEventsClient := daemon.NewTshdEventsClient(func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
})

daemonService, err := daemon.New(daemon.Config{
Clock: fakeClock,
Storage: storage,
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) {
return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
},
Clock: fakeClock,
Storage: storage,
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
TshdEventsClient: tshdEventsClient,
})
require.NoError(t, err)
t.Cleanup(func() {
Expand Down Expand Up @@ -925,6 +940,7 @@ func testWaitForConnectMyComputerNodeJoin(t *testing.T, pack *dbhelpers.Database

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)
Expand Down Expand Up @@ -1009,6 +1025,7 @@ func testDeleteConnectMyComputerNode(t *testing.T, pack *dbhelpers.DatabasePack)

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)
Expand Down Expand Up @@ -1236,6 +1253,7 @@ func testListDatabaseUsers(t *testing.T, pack *dbhelpers.DatabasePack) {

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
ClientStore: tc.ClientStore,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)
Expand Down
7 changes: 1 addition & 6 deletions lib/client/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,11 +499,6 @@ type Config struct {
// SSOMFACeremonyConstructor is a custom SSO MFA ceremony constructor.
SSOMFACeremonyConstructor func(rd *sso.Redirector) mfa.SSOMFACeremony

// CustomHardwareKeyPrompt is a custom hardware key prompt to use when asking
// for a hardware key PIN, touch, etc.
// If empty, a default CLI prompt is used.
CustomHardwareKeyPrompt hardwarekey.Prompt

// DisableSSHResumption disables transparent SSH connection resumption.
DisableSSHResumption bool

Expand Down Expand Up @@ -1293,7 +1288,7 @@ func NewClient(c *Config) (tc *TeleportClient, err error) {
} else {
// TODO (Joerger): init hardware key service (and client store) earlier where it can
// be properly shared.
hardwareKeyService := libhwk.NewService(context.TODO(), tc.CustomHardwareKeyPrompt)
hardwareKeyService := libhwk.NewService(context.TODO(), nil /*prompt*/)
tc.ClientStore = NewFSClientStore(c.KeysDir, WithHardwareKeyService(hardwareKeyService))
if c.AddKeysToAgent == AddKeysToAgentOnly {
// Store client keys in memory, but still save trusted certs and profile to disk.
Expand Down
10 changes: 6 additions & 4 deletions lib/teleterm/clusters/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/jonboulle/clockwork"

"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/utils/keys/hardwarekey"
"github.com/gravitational/teleport/lib/client"
)

Expand All @@ -44,9 +43,8 @@ type Config struct {
WebauthnLogin client.WebauthnLoginFunc
// AddKeysToAgent is passed to [client.Config].
AddKeysToAgent string
// CustomHardwareKeyPrompt is a custom hardware key prompt to use when asking
// for a hardware key PIN, touch, etc.
CustomHardwareKeyPrompt hardwarekey.Prompt
// ClientStore stores client data.
ClientStore *client.Store
}

// CheckAndSetDefaults checks the configuration for its validity and sets default values if needed
Expand All @@ -55,6 +53,10 @@ func (c *Config) CheckAndSetDefaults() error {
return trace.BadParameter("missing working directory")
}

if c.ClientStore == nil {
return trace.BadParameter("missing client store")
}

if c.Clock == nil {
c.Clock = clockwork.NewRealClock()
}
Expand Down
17 changes: 6 additions & 11 deletions lib/teleterm/clusters/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ func NewStorage(cfg Config) (*Storage, error) {

// ListProfileNames returns just the names of profiles in s.Dir.
func (s *Storage) ListProfileNames() ([]string, error) {
profileStore := client.NewFSProfileStore(s.Dir)
pfNames, err := profileStore.ListProfiles()
return pfNames, trace.Wrap(err)
return s.ClientStore.ListProfiles()
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we wrap the error here?

Copy link
Copy Markdown
Contributor Author

@Joerger Joerger Apr 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the function already wraps the error, then there's no reason to wrap it unless we want to add a new message. See below trace.Wrap just returns the existing error.

	if traceErr, ok := err.(Error); ok {
		trace = traceErr
	} else {
		trace = newTrace(err)
	}

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All this time I've thought that trace.Wrap is important so that we can capture the stack trace. 🤦 But this is of course done with the runtime package.

}

// ListRootClusters reads root clusters from profiles.
Expand Down Expand Up @@ -161,7 +159,7 @@ func (s *Storage) addCluster(ctx context.Context, dir, webProxyAddress string) (
profileName := parseName(webProxyAddress)
clusterURI := uri.NewClusterURI(profileName)

cfg := s.makeDefaultClientConfig(clusterURI)
cfg := s.makeClientConfig()
cfg.WebProxyAddr = webProxyAddress

clusterClient, err := client.NewClient(cfg)
Expand Down Expand Up @@ -211,10 +209,8 @@ func (s *Storage) fromProfile(profileName, leafClusterName string) (*Cluster, *c
clusterNameForKey := profileName
clusterURI := uri.NewClusterURI(profileName)

profileStore := client.NewFSProfileStore(s.Dir)

cfg := s.makeDefaultClientConfig(clusterURI)
if err := cfg.LoadProfile(profileStore, profileName); err != nil {
cfg := s.makeClientConfig()
if err := cfg.LoadProfile(s.ClientStore, profileName); err != nil {
return nil, nil, trace.Wrap(err)
}

Expand Down Expand Up @@ -275,15 +271,14 @@ func (s *Storage) loadProfileStatusAndClusterKey(clusterClient *client.TeleportC
return status, nil
}

func (s *Storage) makeDefaultClientConfig(rootClusterURI uri.ResourceURI) *client.Config {
func (s *Storage) makeClientConfig() *client.Config {
cfg := client.MakeDefaultConfig()

cfg.HomePath = s.Dir
cfg.KeysDir = s.Dir
cfg.InsecureSkipVerify = s.InsecureSkipVerify
cfg.AddKeysToAgent = s.AddKeysToAgent
cfg.WebauthnLogin = s.WebauthnLogin
cfg.CustomHardwareKeyPrompt = s.CustomHardwareKeyPrompt
cfg.ClientStore = s.ClientStore
cfg.DTAuthnRunCeremony = dtauthn.NewCeremony().Run
cfg.DTAutoEnroll = dtenroll.AutoEnroll
return cfg
Expand Down
11 changes: 7 additions & 4 deletions lib/teleterm/daemon/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,13 @@ type Config struct {
AgentsDir string

GatewayCreator GatewayCreator
// CreateTshdEventsClientCredsFunc lazily creates creds for the tshd events server ran by the
// Electron app. This is to ensure that the server public key is written to the disk under the
// expected location by the time we get around to creating the client.
CreateTshdEventsClientCredsFunc CreateTshdEventsClientCredsFunc

// TshdEventsClient holds a client to send events to the Electron App.
//
// The startup of the app is orchestrated so that the client is loaded before any other method on
// daemon.Service. This way all the other code in daemon.Service can assume that the tshd events
// client is available right from the beginning, without the need for nil checks.
TshdEventsClient *TshdEventsClient

ConnectMyComputerRoleSetup *connectmycomputer.RoleSetup
ConnectMyComputerTokenProvisioner *connectmycomputer.TokenProvisioner
Expand Down
Loading
Loading