diff --git a/client/android/client.go b/client/android/client.go index 37e17a36319..134501bfe82 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -394,6 +394,66 @@ func (c *Client) RemoveConnectionListener() { c.recorder.RemoveConnectionListener() } +// GetServerPushedConnectionMode returns the canonical name of the +// connection mode the management server most recently pushed via +// PeerConfig (independent of any local profile/env override). Returns +// an empty string when the engine has not connected yet or the server +// has not pushed a value -- the Android UI then knows to display +// just "Follow server" without the (currently: ...) suffix. +func (c *Client) GetServerPushedConnectionMode() string { + cm := c.connMgrSafe() + if cm == nil { + return "" + } + return cm.ServerPushedMode().String() +} + +// GetServerPushedRelayTimeoutSecs returns the relay timeout in seconds +// most recently pushed by the management server, or 0 when no value +// has been received. Used by the Android UI as a hint. +func (c *Client) GetServerPushedRelayTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedRelayTimeoutSecs()) +} + +// GetServerPushedP2pTimeoutSecs returns the ICE-only timeout (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pTimeoutSecs()) +} + +// GetServerPushedP2pRetryMaxSecs returns the ICE-backoff cap (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pRetryMaxSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pRetryMaxSecs()) +} + +// connMgrSafe is a small helper that walks the Client -> ConnectClient +// -> Engine -> ConnMgr chain and returns nil at the first nil pointer. +// Each accessor that surfaces engine state to the Android UI uses it. +func (c *Client) connMgrSafe() *internal.ConnMgr { + cc := c.getConnectClient() + if cc == nil { + return nil + } + engine := cc.Engine() + if engine == nil { + return nil + } + return engine.ConnMgr() +} + func (c *Client) toggleRoute(command routeCommand) error { return command.toggleRoute() } diff --git a/client/android/preferences.go b/client/android/preferences.go index c3c8eb3fbc9..79ea843895f 100644 --- a/client/android/preferences.go +++ b/client/android/preferences.go @@ -307,6 +307,91 @@ func (p *Preferences) SetBlockInbound(block bool) { p.configInput.BlockInbound = &block } +// GetConnectionMode returns the locally configured connection-mode override +// (canonical lower-kebab-case: "relay-forced", "p2p", "p2p-lazy", +// "p2p-dynamic", "follow-server"), or empty string if no local override +// is configured -- the daemon will then follow the server-pushed value. +func (p *Preferences) GetConnectionMode() (string, error) { + if p.configInput.ConnectionMode != nil { + return *p.configInput.ConnectionMode, nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return "", err + } + return cfg.ConnectionMode, nil +} + +// SetConnectionMode stores a local override for the connection mode. +// Pass an empty string to clear the override (revert to following the +// server-pushed value). +func (p *Preferences) SetConnectionMode(mode string) { + m := mode + p.configInput.ConnectionMode = &m +} + +// GetRelayTimeoutSeconds returns the locally configured relay-worker +// inactivity timeout in seconds, or 0 if no override is set (follow +// server-pushed value, or built-in default if the server has none). +func (p *Preferences) GetRelayTimeoutSeconds() (int64, error) { + if p.configInput.RelayTimeoutSeconds != nil { + return int64(*p.configInput.RelayTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.RelayTimeoutSeconds), nil +} + +// SetRelayTimeoutSeconds stores a local override for the relay timeout. +// Pass 0 to clear the override. +func (p *Preferences) SetRelayTimeoutSeconds(secs int64) { + v := uint32(secs) + p.configInput.RelayTimeoutSeconds = &v +} + +// GetP2pTimeoutSeconds returns the locally configured ICE-worker +// inactivity timeout in seconds (only effective in p2p-dynamic mode), +// or 0 if no override is set. +func (p *Preferences) GetP2pTimeoutSeconds() (int64, error) { + if p.configInput.P2pTimeoutSeconds != nil { + return int64(*p.configInput.P2pTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pTimeoutSeconds), nil +} + +// SetP2pTimeoutSeconds stores a local override for the p2p timeout. +// Pass 0 to clear the override. +func (p *Preferences) SetP2pTimeoutSeconds(secs int64) { + v := uint32(secs) + p.configInput.P2pTimeoutSeconds = &v +} + +// GetP2pRetryMaxSeconds returns the locally configured cap on the +// per-peer ICE-failure backoff schedule, or 0 if no override is set. +func (p *Preferences) GetP2pRetryMaxSeconds() (int64, error) { + if p.configInput.P2pRetryMaxSeconds != nil { + return int64(*p.configInput.P2pRetryMaxSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pRetryMaxSeconds), nil +} + +// SetP2pRetryMaxSeconds stores a local override for the backoff cap. +// Pass 0 to clear the override. +func (p *Preferences) SetP2pRetryMaxSeconds(secs int64) { + v := uint32(secs) + p.configInput.P2pRetryMaxSeconds = &v +} + // Commit writes out the changes to the config file func (p *Preferences) Commit() error { _, err := profilemanager.UpdateOrCreateConfig(p.configInput) diff --git a/client/cmd/root.go b/client/cmd/root.go index 29d4328a1f7..a4e8e934976 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -39,6 +39,10 @@ const ( extraIFaceBlackListFlag = "extra-iface-blacklist" dnsRouteIntervalFlag = "dns-router-interval" enableLazyConnectionFlag = "enable-lazy-connection" + connectionModeFlag = "connection-mode" + relayTimeoutFlag = "relay-timeout" + p2pTimeoutFlag = "p2p-timeout" + p2pRetryMaxFlag = "p2p-retry-max" mtuFlag = "mtu" ) @@ -72,6 +76,10 @@ var ( anonymizeFlag bool dnsRouteInterval time.Duration lazyConnEnabled bool + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 mtu uint16 profilesDisabled bool updateSettingsDisabled bool @@ -192,6 +200,15 @@ func init() { upCmd.PersistentFlags().BoolVar(&rosenpassPermissive, rosenpassPermissiveFlag, false, "[Experimental] Enable Rosenpass in permissive mode to allow this peer to accept WireGuard connections without requiring Rosenpass functionality from peers that do not have Rosenpass enabled.") upCmd.PersistentFlags().BoolVar(&autoConnectDisabled, disableAutoConnectFlag, false, "Disables auto-connect feature. If enabled, then the client won't connect automatically when the service starts.") upCmd.PersistentFlags().BoolVar(&lazyConnEnabled, enableLazyConnectionFlag, false, "[Experimental] Enable the lazy connection feature. If enabled, the client will establish connections on-demand. Note: this setting may be overridden by management configuration.") + upCmd.PersistentFlags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + upCmd.PersistentFlags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + upCmd.PersistentFlags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default). Only effective in p2p-dynamic mode (Phase 2).") + upCmd.PersistentFlags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min). Effective in p2p-dynamic mode (Phase 3 of #5989).") } diff --git a/client/cmd/service.go b/client/cmd/service.go index 56d8a8726fa..f8e6e97fecd 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -57,6 +57,24 @@ func init() { installCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) reconfigureCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) + // Profile-level connection-mode + timeout flags. Same semantics as on + // `netbird up` but writeable at install time so server/headless + // installs can pre-seed the active profile before the daemon starts. + // Same package-level vars are shared with upCmd; on `up` they take + // effect through setupConfig(), here we apply them once before + // installing the service so the daemon picks them up on first run. + for _, c := range []*cobra.Command{installCmd, reconfigureCmd} { + c.Flags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + c.Flags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + c.Flags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value. Only effective in p2p-dynamic mode.") + c.Flags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min).") + } + rootCmd.AddCommand(serviceCmd) } diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index 2d45fa063d8..449c910ff51 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -15,6 +15,7 @@ import ( "github.com/kardianos/service" "github.com/spf13/cobra" + "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/util" ) @@ -131,6 +132,12 @@ var installCmd = &cobra.Command{ cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + // Persist any profile-level connection-mode/timeout flags that + // were explicitly set so the daemon picks them up on first start. + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + svcConfig, err := createServiceConfigForInstall() if err != nil { return err @@ -157,6 +164,52 @@ var installCmd = &cobra.Command{ }, } +// applyConnectionModeFlagsToProfile writes the connection-mode + +// timeout flags into the active profile's config file so the daemon +// will use them on its next startup. Only fields whose flag was +// explicitly set are touched; missing flags leave the existing +// profile values intact. Used by install + reconfigure so headless +// deployments can pre-seed everything in a single command. +func applyConnectionModeFlagsToProfile(cmd *cobra.Command) error { + anyChanged := false + for _, name := range []string{connectionModeFlag, relayTimeoutFlag, p2pTimeoutFlag, p2pRetryMaxFlag} { + if f := cmd.Flag(name); f != nil && f.Changed { + anyChanged = true + break + } + } + if !anyChanged { + return nil + } + + cfgPath := profilemanager.DefaultConfigPath + if configPath != "" { + cfgPath = configPath + } + if cfgPath == "" { + return fmt.Errorf("default config path is not set on this platform; pass --config") + } + + ic := profilemanager.ConfigInput{ConfigPath: cfgPath} + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } + if _, err := profilemanager.UpdateOrCreateConfig(ic); err != nil { + return fmt.Errorf("write profile %s: %w", cfgPath, err) + } + cmd.Println("connection-mode/timeout flags persisted to profile:", cfgPath) + return nil +} + var uninstallCmd = &cobra.Command{ Use: "uninstall", Short: "uninstalls NetBird service from system", @@ -207,6 +260,10 @@ This command will temporarily stop the service, update its configuration, and re cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + wasRunning, err := isServiceRunning() if err != nil && !errors.Is(err, ErrGetServiceStatus) { return fmt.Errorf("check service status: %w", err) diff --git a/client/cmd/up.go b/client/cmd/up.go index f4136cb2343..cba3edddee9 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -439,6 +439,19 @@ func setupSetConfigReq(customDNSAddressConverted []byte, cmd *cobra.Command, pro req.LazyConnectionEnabled = &lazyConnEnabled } + if cmd.Flag(connectionModeFlag).Changed { + req.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + req.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + req.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + req.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } + return &req } @@ -555,6 +568,19 @@ func setupConfig(customDNSAddressConverted []byte, cmd *cobra.Command, configFil if cmd.Flag(enableLazyConnectionFlag).Changed { ic.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } return &ic, nil } @@ -669,6 +695,19 @@ func setupLoginRequest(providedSetupKey string, customDNSAddressConverted []byte if cmd.Flag(enableLazyConnectionFlag).Changed { loginRequest.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + loginRequest.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + loginRequest.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + loginRequest.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + loginRequest.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } return &loginRequest, nil } diff --git a/client/internal/conn_mgr.go b/client/internal/conn_mgr.go index 112559132a1..c928d3ae0c8 100644 --- a/client/internal/conn_mgr.go +++ b/client/internal/conn_mgr.go @@ -14,6 +14,8 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/peerstore" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) // ConnMgr coordinates both lazy connections (established on-demand) and permanent peer connections. @@ -28,9 +30,44 @@ type ConnMgr struct { peerStore *peerstore.Store statusRecorder *peer.Status iface lazyconn.WGIface - enabledLocally bool rosenpassEnabled bool + // Resolved values used to drive lifecycle decisions. Updated when + // the management server pushes a new PeerConfig. + mode connectionmode.Mode + relayTimeoutSecs uint32 + // Phase 2 (#5989): ICE-only inactivity timeout (seconds). Used in + // ModeP2PDynamic to teardown the ICE worker without affecting the + // relay tunnel. 0 = ICE never times out. + p2pTimeoutSecs uint32 + // Phase 3 (#5989): maximum seconds between P2P retry attempts. + // 0 means the daemon uses its built-in default. + p2pRetryMaxSecs uint32 + + // Raw inputs kept so we can re-resolve when server-pushed value changes. + envMode connectionmode.Mode + envRelayTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + cfgP2pRetryMax uint32 + + // spMu protects all serverPushed* fields below. Written in + // UpdatedRemotePeerConfig (NetworkMap goroutine), read by + // ServerPushed*() accessors (daemon-RPC GetConfig goroutine). + spMu sync.RWMutex + + // serverPushedMode is the ConnectionMode value that was last received + // from the management server's PeerConfig (independent of any local + // env/cfg override). Updated in UpdatedRemotePeerConfig. Used by the + // Android UI to display "Follow server (currently: )" in the + // connection-mode override dropdown so users can see what they would + // inherit if they leave the override on "Follow server". + serverPushedMode connectionmode.Mode + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + lazyConnMgr *manager.Manager wg sync.WaitGroup @@ -39,72 +76,260 @@ type ConnMgr struct { } func NewConnMgr(engineConfig *EngineConfig, statusRecorder *peer.Status, peerStore *peerstore.Store, iface lazyconn.WGIface) *ConnMgr { - e := &ConnMgr{ + envMode, envRelayTimeout := peer.ResolveModeFromEnv() + + // First-pass resolution without server input -- updated later when + // the first NetworkMap arrives via UpdatedRemotePeerConfig. + mode, relayTimeout, p2pTimeout, p2pRetryMax := resolveConnectionMode( + envMode, envRelayTimeout, + engineConfig.ConnectionMode, engineConfig.RelayTimeoutSeconds, + engineConfig.P2pTimeoutSeconds, + engineConfig.P2pRetryMaxSeconds, + nil, + ) + + return &ConnMgr{ peerStore: peerStore, statusRecorder: statusRecorder, iface: iface, rosenpassEnabled: engineConfig.RosenpassEnabled, + mode: mode, + relayTimeoutSecs: relayTimeout, + p2pTimeoutSecs: p2pTimeout, + p2pRetryMaxSecs: p2pRetryMax, + envMode: envMode, + envRelayTimeout: envRelayTimeout, + cfgMode: engineConfig.ConnectionMode, + cfgRelayTimeout: engineConfig.RelayTimeoutSeconds, + cfgP2pTimeout: engineConfig.P2pTimeoutSeconds, + cfgP2pRetryMax: engineConfig.P2pRetryMaxSeconds, + } +} + +// resolveConnectionMode applies the spec-section-4.1 precedence chain: +// 1. client env (already resolved by caller via peer.ResolveModeFromEnv) +// 2. client config (from profile, including the FollowServer sentinel) +// 3. server-pushed PeerConfig.ConnectionMode (with UNSPECIFIED -> +// legacy LazyConnectionEnabled fallback) +// +// Returns the resolved Mode, the resolved relay-timeout in seconds, and +// the resolved p2p-timeout in seconds. 0 for either timeout means the +// caller should use its built-in default. +func resolveConnectionMode( + envMode connectionmode.Mode, + envRelayTimeout uint32, + cfgMode connectionmode.Mode, + cfgRelayTimeout uint32, + cfgP2pTimeout uint32, + cfgP2pRetryMax uint32, + serverPC *mgmProto.PeerConfig, +) (connectionmode.Mode, uint32, uint32, uint32) { + mode := envMode + if mode == connectionmode.ModeUnspecified { + if cfgMode != connectionmode.ModeUnspecified && cfgMode != connectionmode.ModeFollowServer { + mode = cfgMode + } + } + if mode == connectionmode.ModeUnspecified { + if serverPC != nil { + serverMode := connectionmode.FromProto(serverPC.GetConnectionMode()) + if serverMode != connectionmode.ModeUnspecified { + mode = serverMode + } else { + mode = connectionmode.ResolveLegacyLazyBool(serverPC.GetLazyConnectionEnabled()) + } + } else { + mode = connectionmode.ModeP2P // safe default when nothing at all is known + } + } + + // Relay-timeout precedence (analog). + relay := envRelayTimeout + if relay == 0 { + relay = cfgRelayTimeout + } + if relay == 0 && serverPC != nil { + relay = serverPC.GetRelayTimeoutSeconds() + } + + // P2P-timeout precedence: client config wins over server push. No env + // var in Phase 2; reserved for Phase 3. + p2p := cfgP2pTimeout + if p2p == 0 && serverPC != nil { + p2p = serverPC.GetP2PTimeoutSeconds() } - if engineConfig.LazyConnectionEnabled || lazyconn.IsLazyConnEnabledByEnv() { - e.enabledLocally = true + + // P2pRetryMax resolution (analogous to p2p timeout): + // client-config wins over server-pushed value (0 = not set). + p2pRetryMax := cfgP2pRetryMax + if p2pRetryMax == 0 && serverPC != nil { + p2pRetryMax = serverPC.GetP2PRetryMaxSeconds() } - return e + + return mode, relay, p2p, p2pRetryMax } -// Start initializes the connection manager and starts the lazy connection manager if enabled by env var or cmd line option. +// Start initializes the connection manager. The lazy/dynamic connection +// manager is brought up immediately when the resolved Mode is P2PLazy +// or P2PDynamic. Other modes keep the manager dormant; it can still be +// activated later via UpdatedRemotePeerConfig. func (e *ConnMgr) Start(ctx context.Context) { if e.lazyConnMgr != nil { - log.Errorf("lazy connection manager is already started") + log.Errorf("lazy/dynamic connection manager is already started") return } - - if !e.enabledLocally { - log.Infof("lazy connection manager is disabled") + if !modeUsesLazyMgr(e.mode) { + log.Infof("lazy/dynamic connection manager is disabled (mode=%s)", e.mode) return } - if e.rosenpassEnabled { - log.Warnf("rosenpass connection manager is enabled, lazy connection manager will not be started") + log.Warnf("rosenpass enabled, lazy/dynamic connection manager will not be started") return } - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) + e.startModeSideEffects() } -// UpdatedRemoteFeatureFlag is called when the remote feature flag is updated. -// If enabled, it initializes the lazy connection manager and start it. Do not need to call Start() again. -// If disabled, then it closes the lazy connection manager and open the connections to all peers. -func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { - // do not disable lazy connection manager if it was enabled by env var - if e.enabledLocally { - return nil +// modeUsesLazyMgr is true for the modes whose lifecycle is driven by the +// lazyconn.Manager (which now hosts the two-timer inactivity manager +// since Phase 2). Eager modes (p2p, relay-forced) do not need it. +func modeUsesLazyMgr(m connectionmode.Mode) bool { + return m == connectionmode.ModeP2PLazy || m == connectionmode.ModeP2PDynamic +} + +// startModeSideEffects flips the per-mode goroutines and status flags +// that need to follow a successful initLazyManager. Called by Start() +// and by the management-push transition path. +func (e *ConnMgr) startModeSideEffects() { + if e.mode == connectionmode.ModeP2PLazy { + e.statusRecorder.UpdateLazyConnection(true) } + if e.mode == connectionmode.ModeP2PDynamic { + e.wg.Add(1) + go func() { + defer e.wg.Done() + e.runDynamicInactivityLoop(e.lazyCtx) + }() + } +} - if enabled { - // if the lazy connection manager is already started, do not start it again - if e.lazyConnMgr != nil { - return nil +// runDynamicInactivityLoop reads from the two-timer inactivity channels +// exposed by the inactivity.Manager and dispatches per-peer teardown. +// +// ICEInactiveChan: detach the ICE worker for each listed peer; the +// relay tunnel is left running so traffic still flows. +// +// RelayInactiveChan: close the whole connection. The activity-detector +// will reopen it when the next outbound packet arrives. +// +// Only meaningful in p2p-dynamic mode; in p2p-lazy the iceTimeout is 0 +// and ICEInactiveChan never fires, so the loop is a passthrough. +func (e *ConnMgr) runDynamicInactivityLoop(ctx context.Context) { + if e.lazyConnMgr == nil { + return + } + im := e.lazyConnMgr.InactivityManager() + if im == nil { + return + } + log.Infof("p2p-dynamic inactivity loop started (iceTimeout=%ds, relayTimeout=%ds)", e.p2pTimeoutSecs, e.relayTimeoutSecs) + defer log.Infof("p2p-dynamic inactivity loop stopped") + for { + select { + case <-ctx.Done(): + return + case peers := <-im.ICEInactiveChan(): + for peerKey := range peers { + if err := e.DetachICEForPeer(peerKey); err != nil { + log.Warnf("DetachICEForPeer(%s): %v", peerKey, err) + } + } + case peers := <-im.RelayInactiveChan(): + for peerKey := range peers { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.Log.Infof("relay-inactivity timeout, closing peer connection") + conn.Close(false) + } + } } + } +} - if e.rosenpassEnabled { - log.Infof("rosenpass connection manager is enabled, lazy connection manager will not be started") - return nil +// UpdatedRemotePeerConfig is called when the management server pushes a +// new PeerConfig. Re-resolves the effective mode through the precedence +// chain and starts/stops the lazy manager accordingly. +func (e *ConnMgr) UpdatedRemotePeerConfig(ctx context.Context, pc *mgmProto.PeerConfig) error { + // Capture the raw server-pushed values before resolution so the UI + // can surface them independently of any local override. + if pc != nil { + serverMode := connectionmode.FromProto(pc.GetConnectionMode()) + if serverMode == connectionmode.ModeUnspecified { + serverMode = connectionmode.ResolveLegacyLazyBool(pc.GetLazyConnectionEnabled()) } + e.spMu.Lock() + e.serverPushedMode = serverMode + e.serverPushedRelayTimeoutSecs = pc.GetRelayTimeoutSeconds() + e.serverPushedP2pTimeoutSecs = pc.GetP2PTimeoutSeconds() + e.serverPushedP2pRetryMaxSecs = pc.GetP2PRetryMaxSeconds() + e.spMu.Unlock() + } - log.Warnf("lazy connection manager is enabled by management feature flag") - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) - return e.addPeersToLazyConnManager() - } else { - if e.lazyConnMgr == nil { - return nil - } - log.Infof("lazy connection manager is disabled by management feature flag") + newMode, newRelay, newP2P, newP2pRetry := resolveConnectionMode( + e.envMode, e.envRelayTimeout, e.cfgMode, e.cfgRelayTimeout, + e.cfgP2pTimeout, e.cfgP2pRetryMax, pc, + ) + + if newMode == e.mode && newRelay == e.relayTimeoutSecs && + newP2P == e.p2pTimeoutSecs && newP2pRetry == e.p2pRetryMaxSecs { + return nil + } + prev := e.mode + e.mode = newMode + e.relayTimeoutSecs = newRelay + e.p2pTimeoutSecs = newP2P + e.p2pRetryMaxSecs = newP2pRetry + e.propagateP2pRetryMaxToConns() + + wasManaged := modeUsesLazyMgr(prev) + isManaged := modeUsesLazyMgr(newMode) + modeChanged := prev != newMode + + if modeChanged && wasManaged && !isManaged { + log.Infof("lazy/dynamic connection manager disabled by management push (mode=%s)", newMode) e.closeManager(ctx) e.statusRecorder.UpdateLazyConnection(false) return nil } + + if modeChanged && wasManaged && isManaged { + // Switching between lazy and dynamic at runtime: tear down the + // existing manager so initLazyManager picks up the new timeouts. + log.Infof("lazy/dynamic mode change %s -> %s, restarting manager", prev, newMode) + e.closeManager(ctx) + e.statusRecorder.UpdateLazyConnection(false) + } + + if isManaged && e.lazyConnMgr == nil { + if e.rosenpassEnabled { + log.Warnf("rosenpass enabled, ignoring lazy/dynamic mode push") + return nil + } + log.Infof("lazy/dynamic connection manager enabled by management push (mode=%s)", newMode) + e.initLazyManager(ctx) + e.startModeSideEffects() + return e.addPeersToLazyConnManager() + } + return nil +} + +// UpdatedRemoteFeatureFlag is the legacy entry point that only knows the +// boolean LazyConnectionEnabled field. Kept as a thin shim that builds a +// synthetic PeerConfig and delegates to UpdatedRemotePeerConfig. +// +// Deprecated: callers should switch to UpdatedRemotePeerConfig and pass +// the real PeerConfig so the new ConnectionMode + timeouts propagate. +func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { + return e.UpdatedRemotePeerConfig(ctx, &mgmProto.PeerConfig{LazyConnectionEnabled: enabled}) } // UpdateRouteHAMap updates the route HA mappings in the lazy connection manager @@ -230,17 +455,89 @@ func (e *ConnMgr) ActivatePeer(ctx context.Context, conn *peer.Conn) { conn.Log.Errorf("failed to open connection: %v", err) } } + + // p2p-dynamic: re-attach ICE on EVERY signal trigger, not only on + // the lazy-manager's first activity edge. The runDynamicInactivityLoop + // path (DetachICEForPeer when iceTimeout fires) leaves the peer in an + // "inactivity-with-ICE-detached" sub-state that the lazy manager does + // not represent. Without this re-arm, subsequent remote OFFERs would + // reach handshaker.Listen() with iceListener==nil and be silently + // dropped, leaving the peer stuck on relay even though both sides + // are signaling normally. AttachICE is idempotent (no-op if listener + // already attached) and honors iceBackoff.IsSuspended() so the + // failure-backoff is not bypassed. + if e.mode == connectionmode.ModeP2PDynamic { + if err := conn.AttachICE(); err != nil { + conn.Log.Warnf("AttachICE on signal activity: %v", err) + } + } +} + +// deactivateAction selects what DeactivatePeer should do when the remote +// peer signals GO_IDLE. The dispatch is a pure function of the locally +// resolved connection mode. +type deactivateAction int + +const ( + deactivateNoop deactivateAction = iota + deactivateLazy + deactivateICE +) + +// deactivatePeerAction returns the per-mode deactivation rule. Eager +// modes (p2p, relay-forced, unspecified) ignore GO_IDLE because they +// are meant to keep tunnels always-on. p2p-lazy delegates to the lazy +// connection manager so the whole tunnel is torn down. p2p-dynamic +// detaches only the ICE worker so the relay tunnel stays up. +func (e *ConnMgr) deactivatePeerAction() deactivateAction { + switch e.mode { + case connectionmode.ModeP2PLazy: + return deactivateLazy + case connectionmode.ModeP2PDynamic: + return deactivateICE + default: + return deactivateNoop + } } -// DeactivatePeer deactivates a peer connection in the lazy connection manager. -// If locally the lazy connection is disabled, we force the peer connection open. +// DeactivatePeer is invoked when the remote peer signals GO_IDLE. The +// behavior is per-mode (see deactivatePeerAction). Phase 2 fix for the +// lazy/eager mismatch in #5989: previously this method silently no-op'd +// whenever the local manager was not in lazy mode, so a remote lazy +// peer's GO_IDLE was effectively dropped and the eager local end kept +// the peer awake. func (e *ConnMgr) DeactivatePeer(conn *peer.Conn) { - if !e.isStartedWithLazyMgr() { + switch e.deactivatePeerAction() { + case deactivateLazy: + if !e.isStartedWithLazyMgr() { + return + } + conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") + e.lazyConnMgr.DeactivatePeer(conn.ConnID()) + case deactivateICE: + conn.Log.Infof("detaching ICE worker: remote peer signaled GO_IDLE (p2p-dynamic)") + if err := e.DetachICEForPeer(conn.GetKey()); err != nil { + conn.Log.Warnf("DetachICEForPeer failed: %v", err) + } + case deactivateNoop: + // Eager modes keep the tunnel up unconditionally. return } +} - conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") - e.lazyConnMgr.DeactivatePeer(conn.ConnID()) +// DetachICEForPeer looks up the Conn for peerKey and tears down its +// ICE worker without touching the relay tunnel. Used by: +// - DeactivatePeer when the remote peer sends GO_IDLE (p2p-dynamic) +// - the inactivity manager when the iceTimeout elapses (wired in +// engine.go runDynamicInactivityLoop) +// +// Missing peers are not an error; they may have been removed concurrently. +func (e *ConnMgr) DetachICEForPeer(peerKey string) error { + conn, ok := e.peerStore.PeerConn(peerKey) + if !ok { + return nil + } + return conn.DetachICE() } func (e *ConnMgr) Close() { @@ -257,6 +554,12 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { cfg := manager.Config{ InactivityThreshold: inactivityThresholdEnv(), } + if e.relayTimeoutSecs > 0 { + cfg.RelayInactivityThreshold = time.Duration(e.relayTimeoutSecs) * time.Second + } + if e.mode == connectionmode.ModeP2PDynamic && e.p2pTimeoutSecs > 0 { + cfg.ICEInactivityThreshold = time.Duration(e.p2pTimeoutSecs) * time.Second + } e.lazyConnMgr = manager.NewManager(cfg, engineCtx, e.peerStore, e.iface) e.lazyCtx, e.lazyCtxCancel = context.WithCancel(engineCtx) @@ -268,6 +571,28 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { }() } +// propagateP2pRetryMaxToConns iterates all active Conn instances and +// updates their iceBackoff.SetMaxBackoff. Called when the server pushes +// a new value via UpdatedRemotePeerConfig. Phase 3 of #5989. +func (e *ConnMgr) propagateP2pRetryMaxToConns() { + const sentinelDisabled = ^uint32(0) + v := e.p2pRetryMaxSecs + var d time.Duration + switch v { + case sentinelDisabled: + d = 0 // user-explicit disable + case 0: + d = peer.DefaultP2PRetryMax // server NULL -> use daemon default + default: + d = time.Duration(v) * time.Second + } + for _, peerKey := range e.peerStore.PeersPubKey() { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.SetIceBackoffMax(d) + } + } +} + func (e *ConnMgr) addPeersToLazyConnManager() error { peers := e.peerStore.PeersPubKey() lazyPeerCfgs := make([]lazyconn.PeerConfig, 0, len(peers)) @@ -309,6 +634,71 @@ func (e *ConnMgr) isStartedWithLazyMgr() bool { return e.lazyConnMgr != nil && e.lazyCtxCancel != nil } +// Mode returns the currently resolved connection mode. Used by the engine +// when constructing per-peer connections (Phase 1 forwards it into +// peer.ConnConfig in a follow-up commit). +func (e *ConnMgr) Mode() connectionmode.Mode { + return e.mode +} + +// RelayTimeout returns the resolved relay-worker idle timeout in seconds. +func (e *ConnMgr) RelayTimeout() uint32 { + return e.relayTimeoutSecs +} + +// P2pRetryMax returns the resolved cap in seconds for the ICE-failure +// backoff schedule. Wire-format sentinel uint32-max means "user-explicit +// disable"; callers must translate that to 0. Phase 3 of #5989. +func (e *ConnMgr) P2pRetryMax() uint32 { + return e.p2pRetryMaxSecs +} + +// ServerPushedMode returns the connection mode the management server +// most recently pushed via PeerConfig (independent of any local env +// or config override). Returns ModeUnspecified if no PeerConfig has +// been received yet. Used by the Android UI to display "Follow server +// (currently: )" in the override dropdown. +func (e *ConnMgr) ServerPushedMode() connectionmode.Mode { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedMode +} + +// ServerPushedRelayTimeoutSecs returns the relay-worker idle-timeout +// (seconds) most recently pushed by the management server, or 0 if no +// PeerConfig has been received. Used by the Android UI as a hint in +// the override field. +func (e *ConnMgr) ServerPushedRelayTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedRelayTimeoutSecs +} + +// ServerPushedP2pTimeoutSecs returns the ICE-only inactivity timeout +// (seconds) most recently pushed by the management server. Only +// meaningful in p2p-dynamic mode. +func (e *ConnMgr) ServerPushedP2pTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedP2pTimeoutSecs +} + +// ServerPushedP2pRetryMaxSecs returns the ICE-failure backoff cap +// (seconds) most recently pushed by the management server. When the +// server has not pushed a value (Phase 1 management servers do not +// know about this field yet) the built-in DefaultP2PRetryMax is +// returned so the Android UI hint shows what value the daemon is +// actually using as fallback. +func (e *ConnMgr) ServerPushedP2pRetryMaxSecs() uint32 { + e.spMu.RLock() + v := e.serverPushedP2pRetryMaxSecs + e.spMu.RUnlock() + if v > 0 { + return v + } + return uint32(peer.DefaultP2PRetryMax / time.Second) +} + func inactivityThresholdEnv() *time.Duration { envValue := os.Getenv(lazyconn.EnvInactivityThreshold) if envValue == "" { diff --git a/client/internal/conn_mgr_test.go b/client/internal/conn_mgr_test.go new file mode 100644 index 00000000000..21f0c93d523 --- /dev/null +++ b/client/internal/conn_mgr_test.go @@ -0,0 +1,221 @@ +package internal + +import ( + "testing" + + "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestResolveConnectionMode(t *testing.T) { + cases := []struct { + name string + envMode connectionmode.Mode + envTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + serverPC *mgmProto.PeerConfig + wantMode connectionmode.Mode + wantRelay uint32 + wantP2P uint32 + }{ + { + name: "all unspecified, server says legacy false -> P2P", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: false}, + wantMode: connectionmode.ModeP2P, + }, + { + name: "all unspecified, server says legacy true -> P2P_LAZY", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: true}, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "server pushes new enum -> wins over legacy bool", + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + LazyConnectionEnabled: false, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "client config overrides server", + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "follow-server in client config clears local override", + cfgMode: connectionmode.ModeFollowServer, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "env var beats client config", + envMode: connectionmode.ModeRelayForced, + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "env timeout beats server timeout", + envTimeout: 42, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 100}, + wantMode: connectionmode.ModeP2P, + wantRelay: 42, + }, + { + name: "client config timeout beats server", + cfgRelayTimeout: 50, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 200}, + wantMode: connectionmode.ModeP2P, + wantRelay: 50, + }, + { + name: "no env, no client, only server timeout", + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 300}, + wantMode: connectionmode.ModeP2P, + wantRelay: 300, + }, + { + name: "nil serverPC defaults to P2P", + serverPC: nil, + wantMode: connectionmode.ModeP2P, + }, + { + name: "p2p-dynamic with server-pushed timeouts", + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 10800, RelayTimeoutSeconds: 86400}, + wantMode: connectionmode.ModeP2PDynamic, wantRelay: 86400, wantP2P: 10800, + }, + { + name: "client config p2p-timeout beats server", + cfgP2pTimeout: 555, + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 9999}, + wantMode: connectionmode.ModeP2PDynamic, wantP2P: 555, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + gotMode, gotRelay, gotP2P, _ := resolveConnectionMode(c.envMode, c.envTimeout, c.cfgMode, c.cfgRelayTimeout, c.cfgP2pTimeout, 0, c.serverPC) + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotRelay != c.wantRelay { + t.Errorf("relay-timeout = %v, want %v", gotRelay, c.wantRelay) + } + if gotP2P != c.wantP2P { + t.Errorf("p2p-timeout = %v, want %v", gotP2P, c.wantP2P) + } + }) + } +} + +func TestResolveConnectionMode_P2pRetryMax_NotSet(t *testing.T) { + // serverPC has 0 (= "not set") -> result is 0, daemon will use default + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 0, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Errorf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 0 { + t.Errorf("server-pushed 0 should pass through as 0, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ServerSet(t *testing.T) { + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 600, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Fatalf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 600 { + t.Errorf("server-pushed 600 should win, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ClientCfgWins(t *testing.T) { + _, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, + 300, // cfgP2pRetryMax (client-side override) + &mgmProto.PeerConfig{ + P2PRetryMaxSeconds: 600, + }, + ) + if retryMax != 300 { + t.Errorf("client cfg should override server push, got %d", retryMax) + } +} + +// TestConnMgr_DetachICEForPeer_NotFound verifies that detaching ICE +// for a peer not in the store is a no-op (no error). The lookup miss +// can happen if a peer is removed concurrently with a GO_IDLE signal +// or an inactivity-manager fire. +func TestConnMgr_DetachICEForPeer_NotFound(t *testing.T) { + mgr := &ConnMgr{peerStore: peerstore.NewConnStore()} + + if err := mgr.DetachICEForPeer("unknown-peer-key"); err != nil { + t.Fatalf("DetachICEForPeer for unknown peer should be no-op, got %v", err) + } +} + +// TestConnMgr_deactivatePeerAction verifies the per-mode dispatch rule: +// p2p-dynamic detaches ICE, p2p-lazy delegates to the lazy manager, +// eager modes (p2p, relay-forced) are silent no-ops. This is the core +// fix for the lazy/eager mismatch (Phase 2 #5989). +func TestConnMgr_deactivatePeerAction(t *testing.T) { + cases := []struct { + mode connectionmode.Mode + want deactivateAction + }{ + {connectionmode.ModeP2P, deactivateNoop}, + {connectionmode.ModeRelayForced, deactivateNoop}, + {connectionmode.ModeUnspecified, deactivateNoop}, + {connectionmode.ModeP2PLazy, deactivateLazy}, + {connectionmode.ModeP2PDynamic, deactivateICE}, + } + for _, c := range cases { + t.Run(c.mode.String(), func(t *testing.T) { + mgr := &ConnMgr{mode: c.mode} + if got := mgr.deactivatePeerAction(); got != c.want { + t.Errorf("mode=%v action=%v want=%v", c.mode, got, c.want) + } + }) + } +} + +func TestConnMgr_ServerPushedFieldsAreRaceSafe(t *testing.T) { + cm := &ConnMgr{} + done := make(chan struct{}) + go func() { + for i := 0; i < 1000; i++ { + cm.spMu.Lock() + cm.serverPushedRelayTimeoutSecs = uint32(i) + cm.spMu.Unlock() + } + close(done) + }() + for i := 0; i < 1000; i++ { + _ = cm.ServerPushedRelayTimeoutSecs() + } + <-done +} diff --git a/client/internal/connect.go b/client/internal/connect.go index 72e096a80a1..87768208ac1 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -25,6 +25,7 @@ import ( "github.com/netbirdio/netbird/client/internal/listener" "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/stdnet" @@ -566,6 +567,11 @@ func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConf LazyConnectionEnabled: config.LazyConnectionEnabled, + ConnectionMode: parseConnectionMode(config.ConnectionMode), + RelayTimeoutSeconds: config.RelayTimeoutSeconds, + P2pTimeoutSeconds: config.P2pTimeoutSeconds, + P2pRetryMaxSeconds: config.P2pRetryMaxSeconds, + MTU: selectMTU(config.MTU, peerConfig.Mtu), LogPath: logPath, @@ -695,3 +701,16 @@ func closeConnWithLog(conn *net.UDPConn) { log.Warnf("closing the testing port %d took %s. Usually it is safe to ignore, but continuous warnings may indicate a problem.", conn.LocalAddr().(*net.UDPAddr).Port, time.Since(startClosing)) } } + +// parseConnectionMode is a tolerant wrapper used by the EngineConfig builder. +// An invalid string in the persisted profile (e.g. left over from a +// downgrade-then-upgrade cycle) is logged and treated as Unspecified so the +// daemon falls through to env / server resolution rather than panicking. +func parseConnectionMode(s string) connectionmode.Mode { + m, err := connectionmode.ParseString(s) + if err != nil { + log.Warnf("ignoring invalid connection_mode %q in profile config: %v", s, err) + return connectionmode.ModeUnspecified + } + return m +} diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 0a12a5326e3..5679a5b97df 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -644,6 +644,12 @@ func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) configContent.WriteString(fmt.Sprintf("LazyConnectionEnabled: %v\n", g.internalConfig.LazyConnectionEnabled)) configContent.WriteString(fmt.Sprintf("MTU: %d\n", g.internalConfig.MTU)) + + // Phase 1+2+3 (#5989) connection-mode resolution + lifecycle timers. + configContent.WriteString(fmt.Sprintf("ConnectionMode: %s\n", g.internalConfig.ConnectionMode)) + configContent.WriteString(fmt.Sprintf("RelayTimeoutSeconds: %d\n", g.internalConfig.RelayTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pTimeoutSeconds: %d\n", g.internalConfig.P2pTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pRetryMaxSeconds: %d\n", g.internalConfig.P2pRetryMaxSeconds)) } func (g *BundleGenerator) addProf() (err error) { diff --git a/client/internal/engine.go b/client/internal/engine.go index 7f19e2d2876..1317867bafc 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -61,6 +61,7 @@ import ( "github.com/netbirdio/netbird/client/system" nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" mgm "github.com/netbirdio/netbird/shared/management/client" "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" @@ -137,6 +138,26 @@ type EngineConfig struct { LazyConnectionEnabled bool + // ConnectionMode is the resolved peer-connection mode for this daemon + // session. ModeUnspecified means "fall back to LazyConnectionEnabled". + // Set by the caller of NewEngine; usually populated from + // profilemanager.Config.ConnectionMode in connect.go. + ConnectionMode connectionmode.Mode + + // RelayTimeoutSeconds, when > 0, overrides the server-pushed relay + // timeout. 0 means "follow server-pushed value". + RelayTimeoutSeconds uint32 + + // P2pTimeoutSeconds, when > 0, overrides the server-pushed p2p timeout. + // 0 means "follow server-pushed value". Reserved for Phase 2 -- has no + // effect in Phase 1. + P2pTimeoutSeconds uint32 + + // P2pRetryMaxSeconds, when > 0, overrides the server-pushed + // p2p_retry_max_seconds. 0 = use server-pushed value (or built-in + // default 15 min). Phase 3 of #5989. + P2pRetryMaxSeconds uint32 + MTU uint16 // for debug bundle generation @@ -294,6 +315,17 @@ func NewEngine( return engine } +// ConnMgr returns the engine's ConnMgr or nil if the engine has not been +// started yet (or has already shut down). Used by the Android UI to query +// the server-pushed connection mode for the dropdown's "Follow server" +// label. +func (e *Engine) ConnMgr() *ConnMgr { + if e == nil { + return nil + } + return e.connMgr +} + func (e *Engine) Stop() error { if e == nil { // this seems to be a very odd case but there was the possibility if the netbird down command comes before the engine is fully started @@ -575,7 +607,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.connMgr.Start(e.ctx) e.srWatcher = guard.NewSRWatcher(e.signal, e.relayManager, e.mobileDep.IFaceDiscover, iceCfg) - e.srWatcher.Start(peer.IsForceRelayed()) + e.srWatcher.Start(peer.IsForceRelayed()) //nolint:staticcheck // intentionally retained for Phase-1 backwards compat e.receiveSignalEvents() e.receiveManagementEvents() @@ -1231,8 +1263,8 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { return nil } - if err := e.connMgr.UpdatedRemoteFeatureFlag(e.ctx, networkMap.GetPeerConfig().GetLazyConnectionEnabled()); err != nil { - log.Errorf("failed to update lazy connection feature flag: %v", err) + if err := e.connMgr.UpdatedRemotePeerConfig(e.ctx, networkMap.GetPeerConfig()); err != nil { + log.Errorf("failed to update connection mode from PeerConfig: %v", err) } if e.firewall != nil { @@ -1560,7 +1592,9 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV Addr: e.getRosenpassAddr(), PermissiveMode: e.config.RosenpassPermissive, }, - ICEConfig: e.createICEConfig(), + ICEConfig: e.createICEConfig(), + Mode: e.connMgr.Mode(), + P2pRetryMaxSeconds: e.connMgr.P2pRetryMax(), } serviceDependencies := peer.ServiceDependencies{ diff --git a/client/internal/lazyconn/env.go b/client/internal/lazyconn/env.go index 649d1cd65de..cfdcc67d61d 100644 --- a/client/internal/lazyconn/env.go +++ b/client/internal/lazyconn/env.go @@ -12,6 +12,11 @@ const ( EnvInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +// IsLazyConnEnabledByEnv reads NB_ENABLE_EXPERIMENTAL_LAZY_CONN. +// +// Deprecated: use peer.ResolveModeFromEnv() -- kept here to not break +// existing callers in conn_mgr.go during the Phase-1 refactor; will be +// removed once all call sites use the new resolver. func IsLazyConnEnabledByEnv() bool { val := os.Getenv(EnvEnableLazyConn) if val == "" { diff --git a/client/internal/lazyconn/inactivity/manager.go b/client/internal/lazyconn/inactivity/manager.go index 0120f443049..5efbe70e10f 100644 --- a/client/internal/lazyconn/inactivity/manager.go +++ b/client/internal/lazyconn/inactivity/manager.go @@ -22,30 +22,89 @@ type WgInterface interface { LastActivities() map[string]monotime.Time } +// Manager watches per-peer activity timestamps from the WireGuard +// interface and notifies via channels when peers cross inactivity +// thresholds. +// +// Phase 2 (#5989) introduced TWO independent thresholds per peer: +// - iceTimeout fires the iceInactiveChan (consumer detaches the ICE +// worker but keeps the relay-tunnel up). +// - relayTimeout fires the relayInactiveChan (consumer tears down +// the whole connection). +// +// Threshold == 0 disables that channel for all peers (the corresponding +// teardown never fires). Phase-1 p2p-lazy is expressed as +// iceTimeout=0 + relayTimeout=X; the legacy InactivePeersChan is the +// same as RelayInactiveChan for backwards compat. type Manager struct { - inactivePeersChan chan map[string]struct{} + iface WgInterface - iface WgInterface - interestedPeers map[string]*lazyconn.PeerConfig + // Two-timer thresholds (Phase 2). Both 0 = manager is effectively + // inert (peers register but no channel ever fires). + iceTimeout time.Duration + relayTimeout time.Duration + + interestedPeers map[string]*lazyconn.PeerConfig + + iceInactiveChan chan map[string]struct{} + relayInactiveChan chan map[string]struct{} + + // inactivityThreshold + inactivePeersChan are kept for the + // Phase-1 NewManager API. Internally they alias to the relay + // timeout / channel. inactivityThreshold time.Duration + inactivePeersChan chan map[string]struct{} } +// NewManager is the Phase-1 single-timer constructor. Pass a *time.Duration +// to override the default DefaultInactivityThreshold; nil uses the default. +// +// Deprecated: use NewManagerWithTwoTimers. NewManager remains the entry +// point for callers that haven't been migrated; it constructs a manager +// with iceTimeout=0 (= ICE always-on, p2p-lazy semantics). func NewManager(iface WgInterface, configuredThreshold *time.Duration) *Manager { - inactivityThreshold, err := validateInactivityThreshold(configuredThreshold) + threshold, err := validateInactivityThreshold(configuredThreshold) if err != nil { - inactivityThreshold = DefaultInactivityThreshold + threshold = DefaultInactivityThreshold log.Warnf("invalid inactivity threshold configured: %v, using default: %v", err, DefaultInactivityThreshold) } - log.Infof("inactivity threshold configured: %v", inactivityThreshold) + log.Infof("inactivity threshold configured: %v", threshold) + return newManager(iface, 0, threshold) +} + +// NewManagerWithTwoTimers is the Phase-2 constructor. Pass 0 for either +// timeout to disable that teardown path. Both 0 leaves the manager +// running but inert (no channel ever fires) -- used by p2p / relay-forced +// modes that don't tear down workers. +func NewManagerWithTwoTimers(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + if iceTimeout > 0 { + log.Infof("ICE inactivity timeout: %v", iceTimeout) + } + if relayTimeout > 0 { + log.Infof("relay inactivity timeout: %v", relayTimeout) + } + return newManager(iface, iceTimeout, relayTimeout) +} + +func newManager(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + relayCh := make(chan map[string]struct{}, 1) return &Manager{ - inactivePeersChan: make(chan map[string]struct{}, 1), iface: iface, + iceTimeout: iceTimeout, + relayTimeout: relayTimeout, interestedPeers: make(map[string]*lazyconn.PeerConfig), - inactivityThreshold: inactivityThreshold, + iceInactiveChan: make(chan map[string]struct{}, 1), + relayInactiveChan: relayCh, + inactivityThreshold: relayTimeout, + inactivePeersChan: relayCh, // Phase-1 alias: same channel as relayInactiveChan } } +// InactivePeersChan is the Phase-1 channel for whole-tunnel teardown. +// In the Phase-2 internal model this is the same channel as +// RelayInactiveChan -- existing callers (engine.go p2p-lazy path) keep +// working unchanged. func (m *Manager) InactivePeersChan() chan map[string]struct{} { if m == nil { // return a nil channel that blocks forever @@ -55,6 +114,26 @@ func (m *Manager) InactivePeersChan() chan map[string]struct{} { return m.inactivePeersChan } +// ICEInactiveChan returns the channel that signals ICE-worker-only +// inactivity per peer (consumer typically calls Conn.DetachICE). +// Always returns a valid channel; if iceTimeout is 0, the channel +// just never fires. +func (m *Manager) ICEInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.iceInactiveChan +} + +// RelayInactiveChan returns the channel that signals relay-worker +// (and thus whole-tunnel) inactivity per peer. +func (m *Manager) RelayInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.relayInactiveChan +} + func (m *Manager) AddPeer(peerCfg *lazyconn.PeerConfig) { if m == nil { return @@ -95,24 +174,25 @@ func (m *Manager) Start(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C(): - idlePeers, err := m.checkStats() + iceIdle, relayIdle, err := m.checkStats() if err != nil { log.Errorf("error checking stats: %v", err) return } - if len(idlePeers) == 0 { - continue + if len(iceIdle) > 0 { + m.notifyChan(ctx, m.iceInactiveChan, iceIdle) + } + if len(relayIdle) > 0 { + m.notifyChan(ctx, m.relayInactiveChan, relayIdle) } - - m.notifyInactivePeers(ctx, idlePeers) } } } -func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[string]struct{}) { +func (m *Manager) notifyChan(ctx context.Context, ch chan map[string]struct{}, peers map[string]struct{}) { select { - case m.inactivePeersChan <- inactivePeers: + case ch <- peers: case <-ctx.Done(): return default: @@ -120,10 +200,24 @@ func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[str } } -func (m *Manager) checkStats() (map[string]struct{}, error) { +// checkStats walks the per-peer activity-since values and groups peers +// into two sets: +// - iceIdle: peers idle longer than iceTimeout (only populated when +// iceTimeout > 0; otherwise this set is always empty) +// - relayIdle: peers idle longer than relayTimeout (only populated +// when relayTimeout > 0) +// +// Both sets are returned independently so consumers can act on each +// without coupling. A peer that has crossed both thresholds appears in +// both sets and the consumer is expected to handle them in order +// (first DetachICE on the iceIdle set, then full Close on the relayIdle +// set; the order is fine because Close on a peer where ICE is already +// detached is still correct). +func (m *Manager) checkStats() (iceIdle, relayIdle map[string]struct{}, err error) { lastActivities := m.iface.LastActivities() - idlePeers := make(map[string]struct{}) + iceIdle = make(map[string]struct{}) + relayIdle = make(map[string]struct{}) checkTime := time.Now() for peerID, peerCfg := range m.interestedPeers { @@ -135,13 +229,18 @@ func (m *Manager) checkStats() (map[string]struct{}, error) { } since := monotime.Since(lastActive) - if since > m.inactivityThreshold { - peerCfg.Log.Infof("peer is inactive since time: %s", checkTime.Add(-since).String()) - idlePeers[peerID] = struct{}{} + + if m.iceTimeout > 0 && since > m.iceTimeout { + peerCfg.Log.Debugf("peer ICE idle since: %s", checkTime.Add(-since).String()) + iceIdle[peerID] = struct{}{} + } + if m.relayTimeout > 0 && since > m.relayTimeout { + peerCfg.Log.Infof("peer relay idle since: %s", checkTime.Add(-since).String()) + relayIdle[peerID] = struct{}{} } } - return idlePeers, nil + return iceIdle, relayIdle, nil } func validateInactivityThreshold(configuredThreshold *time.Duration) (time.Duration, error) { diff --git a/client/internal/lazyconn/inactivity/manager_test.go b/client/internal/lazyconn/inactivity/manager_test.go index 10b4ef1ebb4..ce32cf93b7a 100644 --- a/client/internal/lazyconn/inactivity/manager_test.go +++ b/client/internal/lazyconn/inactivity/manager_test.go @@ -112,3 +112,260 @@ func (f *fakeTickerMock) C() <-chan time.Time { } func (f *fakeTickerMock) Stop() {} + +// --- Phase 2 (#5989) two-timer tests --- + +// makePeerCfg is a test helper for building a minimal PeerConfig with logger. +func makePeerCfg(peerID string) *lazyconn.PeerConfig { + return &lazyconn.PeerConfig{ + PublicKey: peerID, + Log: log.WithField("peer", peerID), + } +} + +// pastActivity returns a monotime.Time corresponding to (now - d). +func pastActivity(d time.Duration) monotime.Time { + return monotime.Time(int64(monotime.Now()) - int64(d)) +} + +func TestTwoTimers_OnlyICEFires(t *testing.T) { + peerID := "peer1" + + // Peer idle for 6 minutes: above iceTimeout (5m), below relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID, "expected peerID on ICE channel") + case <-time.After(1 * time.Second): + t.Fatal("expected ICE-inactive event, none received") + } + + // Relay channel must NOT fire. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should not fire when only iceTimeout exceeded") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothFire(t *testing.T) { + peerID := "peer1" + + // Peer idle for 25h: above both iceTimeout (5m) and relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(25 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + gotICE := false + gotRelay := false + deadline := time.After(1 * time.Second) + for !gotICE || !gotRelay { + select { + case peers := <-manager.ICEInactiveChan(): + if _, ok := peers[peerID]; ok { + gotICE = true + } + case peers := <-manager.RelayInactiveChan(): + if _, ok := peers[peerID]; ok { + gotRelay = true + } + case <-deadline: + t.Fatalf("timeout waiting for both channels (gotICE=%v, gotRelay=%v)", gotICE, gotRelay) + } + } +} + +func TestTwoTimers_ICEDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=0 (disabled) + relayTimeout=10m, peer idle 11m -> only relay fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(11 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 10*time.Minute) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.RelayInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("relay channel should fire when relayTimeout exceeded") + } + + // ICE channel must never fire because iceTimeout=0. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel should NEVER fire when iceTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_RelayDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=5m + relayTimeout=0, peer idle 6m -> only ICE fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("ICE channel should fire when iceTimeout exceeded") + } + + // Relay channel must never fire because relayTimeout=0. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should NEVER fire when relayTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothDisabled(t *testing.T) { + peerID := "peer1" + + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(99 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // Neither channel should fire. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire when both disabled") + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel must not fire when both disabled") + case <-time.After(300 * time.Millisecond): + // expected + } +} + +// TestPhase1_LazyEquivalence verifies that the legacy NewManager constructor +// behaves identically to the Phase-1 single-timer code: peers cross the +// (single) inactivityThreshold and appear on InactivePeersChan, ICE +// channel never fires. +func TestPhase1_LazyEquivalence(t *testing.T) { + peerID := "peer1" + + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(20 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + // Phase-1 entry point with default threshold (15m). + manager := NewManager(wgMock, nil) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // InactivePeersChan (Phase-1 alias of RelayInactiveChan) must fire. + select { + case peers := <-manager.InactivePeersChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("Phase-1 InactivePeersChan must fire (= RelayInactiveChan in Phase 2)") + } + + // ICE channel must NEVER fire from Phase-1 entry point (iceTimeout=0). + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire in Phase-1 NewManager mode") + case <-time.After(200 * time.Millisecond): + // expected + } +} diff --git a/client/internal/lazyconn/manager/manager.go b/client/internal/lazyconn/manager/manager.go index fc47bda39d5..c1c4be003d8 100644 --- a/client/internal/lazyconn/manager/manager.go +++ b/client/internal/lazyconn/manager/manager.go @@ -28,7 +28,31 @@ type managedPeer struct { } type Config struct { + // Phase-1 single-timer field. Deprecated: use ICEInactivityThreshold + // and RelayInactivityThreshold instead. Kept so existing callers + // (engine.go) compile during the Phase-2 transition; internally + // treated as RelayInactivityThreshold when the new fields are zero. InactivityThreshold *time.Duration + + // ICEInactivityThreshold is the per-peer ICE-worker idle timeout + // (Phase 2 / #5989). 0 = ICE always-on (= p2p-lazy semantics, where + // the whole tunnel goes idle but ICE is never torn down separately). + ICEInactivityThreshold time.Duration + + // RelayInactivityThreshold is the per-peer relay-worker idle timeout + // (Phase 2). 0 = relay always-on. + RelayInactivityThreshold time.Duration +} + +// resolvedTimeouts returns the effective (ICE, Relay) timeouts. If only +// the deprecated InactivityThreshold field is set, it maps onto the +// relay timeout for Phase-1 p2p-lazy semantics. +func (c Config) resolvedTimeouts() (iceTimeout, relayTimeout time.Duration) { + relay := c.RelayInactivityThreshold + if relay == 0 && c.InactivityThreshold != nil { + relay = *c.InactivityThreshold + } + return c.ICEInactivityThreshold, relay } // Manager manages lazy connections @@ -76,7 +100,13 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S } if wgIface.IsUserspaceBind() { - m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) + iceTO, relayTO := config.resolvedTimeouts() + if iceTO == 0 && relayTO == 0 { + // Phase 1 / single-timer fallback when caller hasn't migrated. + m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) //nolint:staticcheck // intentional Phase-1 single-timer fallback + } else { + m.inactivityManager = inactivity.NewManagerWithTwoTimers(wgIface, iceTO, relayTO) + } } else { log.Warnf("inactivity manager not supported for kernel mode, wait for remote peer to close the connection") } @@ -84,6 +114,18 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S return m } +// InactivityManager exposes the underlying inactivity.Manager so the +// engine / conn_mgr can subscribe to ICEInactiveChan / RelayInactiveChan +// in the p2p-dynamic mode lifecycle. Returns nil if the manager runs in +// kernel-bind mode (no inactivity tracking) or if the manager itself is +// nil (defensive). +func (m *Manager) InactivityManager() *inactivity.Manager { + if m == nil { + return nil + } + return m.inactivityManager +} + // UpdateRouteHAMap updates the HA group mappings for routes // This should be called when route configuration changes func (m *Manager) UpdateRouteHAMap(haMap route.HAMap) { diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 1e416bfe707..305eb071356 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/iface/wgproxy" "github.com/netbirdio/netbird/client/internal/metrics" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/peer/conntype" "github.com/netbirdio/netbird/client/internal/peer/dispatcher" "github.com/netbirdio/netbird/client/internal/peer/guard" @@ -86,11 +87,24 @@ type ConnConfig struct { // ICEConfig ICE protocol configuration ICEConfig icemaker.Config + + // Mode is the resolved connection mode for this peer (forwarded + // from the engine, which got it from the conn_mgr precedence chain). + // Phase 1 uses it to pick the skip-ICE branch when ModeRelayForced. + Mode connectionmode.Mode + + // P2pRetryMaxSeconds is the cap for the ICE-failure backoff schedule + // in p2p-dynamic mode. 0 = use built-in default (DefaultP2PRetryMax). + // Wire-format sentinel uint32-max (= ^uint32(0)) means "user-explicit + // disable", which the resolver translates to time.Duration(0) at + // engine.go before passing it here. Phase 3 of #5989. + P2pRetryMaxSeconds uint32 } type Conn struct { Log *log.Entry mu sync.Mutex + iceBackoff *iceBackoffState ctx context.Context ctxCancel context.CancelFunc config ConnConfig @@ -185,8 +199,24 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) - forceRelay := IsForceRelayed() - if !forceRelay { + // Phase 3: initialize per-peer ICE-failure backoff. The cap comes + // from the resolved P2pRetryMaxSeconds. 0 means "use built-in default". + backoffCap := time.Duration(conn.config.P2pRetryMaxSeconds) * time.Second + if backoffCap == 0 { + backoffCap = DefaultP2PRetryMax + } + if conn.iceBackoff == nil { + conn.iceBackoff = newIceBackoff(backoffCap) + } else { + conn.iceBackoff.SetMaxBackoff(backoffCap) + } + + // Mode-driven branching. ModeRelayForced skips ICE entirely; all + // other modes (P2P, P2PLazy, P2PDynamic) construct workerICE + // eagerly in Phase 1. Phase 2 will branch P2PDynamic separately + // to defer the OnNewOffer registration. + skipICE := conn.config.Mode == connectionmode.ModeRelayForced + if !skipICE { relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) if err != nil { @@ -198,11 +228,25 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages) conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer) - if !forceRelay { + + // ICE-listener registration depends on mode: + // - ModeRelayForced: skipICE=true, no workerICE, no listener. + // - ModeP2P, ModeP2PLazy: workerICE constructed, listener registered eagerly. + // P2PLazy's whole-tunnel deferral happens at the conn_mgr level, not here. + // - ModeP2PDynamic: workerICE constructed eagerly so it's ready, but the + // listener registration is deferred. The inactivity manager calls + // Conn.AttachICE() once activity is observed on the relay tunnel. + deferICEListener := conn.config.Mode == connectionmode.ModeP2PDynamic + if !skipICE && !deferICEListener { conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) } conn.guard = guard.NewGuard(conn.Log, conn.isConnectedOnAllWay, conn.config.Timeout, conn.srWatcher) + // Phase 3.5 (#5989): reset ICE backoff + recreate workerICE on network change. + // Set before Start() is called so the goroutine sees it without races. + if !skipICE { + conn.guard.SetOnNetworkChange(conn.onNetworkChange) + } conn.wg.Add(1) go func() { @@ -740,7 +784,7 @@ func (conn *Conn) isConnectedOnAllWay() (status guard.ConnStatus) { } return evalConnStatus(connStatusInputs{ - forceRelay: IsForceRelayed(), + forceRelay: conn.config.Mode == connectionmode.ModeRelayForced, peerUsesRelay: conn.workerRelay.IsRelayConnectionSupportedWithPeer(), relayConnected: conn.statusRelay.Get() == worker.StatusConnected, remoteSupportsICE: conn.handshaker.RemoteICESupported(), @@ -975,3 +1019,251 @@ func boolToConnStatus(connected bool) guard.ConnStatus { } return guard.ConnStatusDisconnected } + +// AttachICE registers the ICE-offer listener on the handshaker after the +// activity-detector observes traffic on the relay tunnel. Idempotent: if +// the listener is already attached, it is a no-op. Triggers a fresh offer +// so the remote side learns we are now ICE-capable. +// +// Used by p2p-dynamic mode: workerICE is created in Open() but the +// handshaker dispatch is deferred until traffic activity is seen. +func (conn *Conn) AttachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return nil + } + if conn.handshaker == nil { + return fmt.Errorf("AttachICE: handshaker not initialized (Open not called)") + } + if conn.workerICE == nil { + return fmt.Errorf("AttachICE: workerICE is nil (relay-forced mode)") + } + + if !conn.attachICEListenerLocked() { + return nil + } + + if err := conn.handshaker.SendOffer(); err != nil { + conn.Log.Warnf("AttachICE: SendOffer failed: %v", err) + } + return nil +} + +// attachICEListenerLocked attaches the ICE listener to the handshaker if it +// is not already attached. Returns true when a new attachment was made, +// false when the call was a no-op (already attached, ICE backoff suspended, +// handshaker not initialised, or workerICE not present). +// +// Caller MUST hold conn.mu. Used by: +// - AttachICE (signal-trigger path), which then issues SendOffer. +// - onNetworkChange (Phase 3.7e, #5989), which deliberately does NOT call +// SendOffer because the Guard reconnect-loop handles that. +// +// Honours iceBackoff.IsSuspended() so the failure-backoff is not bypassed. +func (conn *Conn) attachICEListenerLocked() bool { + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return false + } + if conn.handshaker == nil || conn.workerICE == nil { + return false + } + if conn.handshaker.readICEListener() != nil { + return false + } + + conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) + conn.Log.Debugf("ICE listener attached (locked path)") + return true +} + +// DetachICE removes the ICE-offer listener and tears down the ICE worker. +// Idempotent: if no listener is attached, it is a no-op. Used by +// p2p-dynamic mode when the inactivity manager fires the iceTimeout but +// the relay tunnel should stay up. +func (conn *Conn) DetachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.handshaker == nil { + return nil + } + if conn.handshaker.readICEListener() == nil { + return nil + } + + conn.handshaker.RemoveICEListener() + if conn.workerICE != nil { + conn.workerICE.Close() + } + conn.Log.Debugf("ICE listener detached (p2p-dynamic teardown)") + return nil +} + +// onICEFailed is invoked when pion's ICE agent reports +// ConnectionStateFailed. Increments the backoff counter and tears +// down the ICE worker. Phase 3 of #5989. +func (conn *Conn) onICEFailed() { + if conn.iceBackoff == nil { + return + } + delay := conn.iceBackoff.markFailure() + snap := conn.iceBackoff.Snapshot() + if delay > 0 { + conn.Log.Infof("ICE failure #%d, suspending for %s, next retry at %s", + snap.Failures, + delay.Round(time.Second), + snap.NextRetry.Format("15:04:05")) + } + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, snap) + } + // Tear down ICE. Idempotent. Conn stays on relay. + if err := conn.DetachICE(); err != nil { + conn.Log.Warnf("DetachICE after onICEFailed: %v", err) + } +} + +// onICEConnected is invoked when pion's ICE agent reports +// ConnectionStateConnected. Resets the backoff. Phase 3 of #5989. +func (conn *Conn) onICEConnected() { + if conn.iceBackoff == nil { + return + } + if conn.iceBackoff.Snapshot().Failures > 0 { + conn.Log.Infof("ICE success, resetting backoff (was %d failures)", + conn.iceBackoff.Snapshot().Failures) + } + conn.iceBackoff.markSuccess() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } +} + +// SetIceBackoffMax updates the per-peer backoff cap. Called by ConnMgr +// when the server pushes a new p2p_retry_max_seconds value. If the +// iceBackoff is not yet initialized (Conn not opened yet), the value +// is stored in config so Open() picks it up. Phase 3 of #5989. +func (conn *Conn) SetIceBackoffMax(d time.Duration) { + conn.mu.Lock() + defer conn.mu.Unlock() + conn.config.P2pRetryMaxSeconds = uint32(d / time.Second) + if conn.iceBackoff != nil { + conn.iceBackoff.SetMaxBackoff(d) + } +} + +// IceBackoffSnapshot exposes the read-only backoff state for the +// status output (Task E1). Returns zero-value snapshot if no backoff +// is active. Phase 3 of #5989. +func (conn *Conn) IceBackoffSnapshot() BackoffSnapshot { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.iceBackoff == nil { + return BackoffSnapshot{} + } + return conn.iceBackoff.Snapshot() +} + +// onNetworkChange is invoked by Guard when the signal/relay layer +// reconnects after a network change (LTE-modem replug, WiFi roaming, etc.). +// Phase 3.5 of #5989. +// +// Resets the per-peer ICE-failure backoff (because the NAT topology may +// have changed -- previous failures do not predict future ones) AND +// recreates the workerICE wrapper so the next AttachICE/offer has a +// fresh pion-agent rather than one closed by a previous DetachICE call. +// +// Called from Guard's goroutine; acquires conn.mu, so it must not be +// invoked from a path that already holds conn.mu. +func (conn *Conn) onNetworkChange() { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.ctx.Err() != nil { + return + } + + if conn.iceBackoff != nil { + snap := conn.iceBackoff.Snapshot() + if snap.Failures > 0 { + conn.Log.Infof("network change detected, resetting ICE backoff (was %d failures)", + snap.Failures) + } + conn.iceBackoff.Reset() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } + } + + // We deliberately do NOT replace the workerICE wrapper here. Replacing + // it leaks underlying socket/iface bindings between the old and new + // instance, which empirically causes ICE to fail with a 13s pair-check + // timeout instead of converging in <1s like a fresh daemon-start does. + // + // We also deliberately do NOT call handshaker.SendOffer() here even + // though that was an earlier attempt. The Guard's reconnect-loop + // already issues sendOffer via its newReconnectTicker (800ms initial, + // up to ~4 retries in the first ~6s) right after the same srReconnect + // event that fires this callback. Adding our own SendOffer just creates + // a sending-offer storm: 5 offers per peer in 6 seconds, which on the + // remote side triggers repeated tear-down + reCreateAgent cycles in + // quick succession (each new sessionID forces it). That prevents ICE + // from ever completing its pair-checks. + // + // All we do here: close the current pion agent (sets w.agent = nil). + // The Guard's natural reconnect-loop then drives the next sendOffer, + // the remote responds with a fresh offer, and our existing OnNewOffer + // path (still attached to the unchanged workerICE wrapper) goes + // through the well-tested "agent==nil + new offer -> reCreateAgent" + // branch in worker_ice.go. + // + // Phase 3.7g (#5989): only tear down the workerICE agent when ICE is + // actually broken. If pion's lastKnownState is still Connected the + // peer-to-peer UDP path is alive end-to-end (typical for a brief + // signal-server outage where WG keepalives between peers continued + // to flow); closing the agent here would force a 15-25 s ICE + // renegotiation cycle plus a Relay→ICE handover gap that the user + // would observe as a ping dropout for no good reason. + // + // If ICE actually went Disconnected/Failed during the network event, + // pion has already cleared w.agent via onConnectionStateChange and + // the Close call below is a no-op anyway. Either way, a fresh remote + // OFFER will recreate the agent through the existing OnNewOffer path. + // + // In ModeRelayForced workerICE is nil; nothing to close. + if conn.workerICE != nil && !conn.workerICE.IsConnected() { + conn.workerICE.Close() + } else if conn.workerICE != nil { + conn.Log.Debugf("network change: skipping workerICE.Close (ICE still Connected, soft-fallback)") + } + + // Phase 3.7e (#5989): force the ICE listener back on after a network + // change. Empirically, after an LTE-modem replug the iceListener can + // end up detached for some peers (paths via onICEFailed → DetachICE + // after a Failed transition that we did not log because of timing, + // or via concurrent state changes during the bounce). Re-attaching + // on every signal in ConnMgr.ActivatePeer (Phase 3.7d) is necessary + // but not sufficient: by the time the next signal arrives, several + // remote OFFERs and the Guard's first sendOffer may already have + // been silently dropped at handshaker.Listen() because no listener + // was present. Re-attaching here closes that window deterministically. + // + // We do NOT call SendOffer from this path. The Guard's natural + // reconnect-ticker (newReconnectTicker, 800 ms initial) issues the + // next offer right after the same srReconnect event that drove this + // callback; sending an extra one creates the offer-storm that + // Phase 3.7b removed. + conn.attachICEListenerLocked() + + conn.Log.Debugf("ICE state reset on network change (agent closed; listener re-armed; Guard will resend offer)") +} diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 59216b647e9..58b8432bdd2 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "os" + "strings" "testing" "time" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/netbirdio/netbird/client/iface" @@ -281,6 +283,137 @@ func TestConn_presharedKey(t *testing.T) { } } +// TestConn_AttachICE_NilHandshaker verifies AttachICE errors when called +// before Open() has wired up the handshaker. +func TestConn_AttachICE_NilHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE on Conn with nil handshaker should return error") + } +} + +// TestConn_AttachICE_NilWorkerICE verifies AttachICE errors when the conn +// is in relay-forced mode (workerICE was never created). +func TestConn_AttachICE_NilWorkerICE(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + } + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE with nil workerICE should return error (relay-forced mode)") + } +} + +// TestConn_DetachICE_NoHandshaker is a no-op idempotency check: calling +// DetachICE before Open() must not panic and must not error. +func TestConn_DetachICE_NoHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE with nil handshaker should be no-op, got error: %v", err) + } +} + +// TestConn_DetachICE_ClearsListener verifies DetachICE removes the ICE +// listener from the handshaker. workerICE is left nil so Close() is skipped. +func TestConn_DetachICE_ClearsListener(t *testing.T) { + h := &Handshaker{} + h.AddICEListener(func(o *OfferAnswer) {}) + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: h, + } + + if h.readICEListener() == nil { + t.Fatal("precondition: handshaker should have a listener") + } + + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE returned error: %v", err) + } + + if h.readICEListener() != nil { + t.Fatal("DetachICE should clear the ICE listener") + } + + // Idempotent: second call is a no-op. + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE second call should be no-op, got: %v", err) + } +} + +func TestConn_AttachICE_NoOpWhenSuspended(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() // suspend it + + // AttachICE should return nil but not actually attach + err := c.AttachICE() + if err != nil { + t.Fatalf("expected nil error during backoff, got %v", err) + } + if c.handshaker.readICEListener() != nil { + t.Fatal("AttachICE during backoff must NOT register a listener") + } +} + +func TestConn_AttachICE_AfterBackoffExpiry(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + // Force nextRetry into the past + c.iceBackoff.mu.Lock() + c.iceBackoff.nextRetry = time.Now().Add(-1 * time.Second) + c.iceBackoff.mu.Unlock() + + // Without workerICE, AttachICE returns the "nil workerICE" error + // -- but we only care that the backoff gate is NOT engaged anymore. + err := c.AttachICE() + if err == nil { + t.Fatal("expected the relay-forced error path (nil workerICE)") + } + // The error should be about workerICE, not "suspended": + if errMsg := err.Error(); !strings.Contains(errMsg, "workerICE") { + t.Fatalf("after backoff expiry, error should be about workerICE not suspend; got %q", errMsg) + } +} + +func TestConn_OnICEFailed_MarksBackoffFailure(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + if c.iceBackoff.IsSuspended() { + t.Fatal("precondition: not suspended") + } + c.onICEFailed() + if !c.iceBackoff.IsSuspended() { + t.Fatal("after onICEFailed, must be suspended") + } + if c.iceBackoff.Snapshot().Failures != 1 { + t.Fatalf("failures must be 1, got %d", c.iceBackoff.Snapshot().Failures) + } +} + +func TestConn_OnICEConnected_ResetsBackoff(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + c.iceBackoff.markFailure() + c.onICEConnected() + snap := c.iceBackoff.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after onICEConnected: %+v", snap) + } +} + func TestConn_presharedKey_RosenpassManaged(t *testing.T) { conn := Conn{ config: ConnConfig{ diff --git a/client/internal/peer/env.go b/client/internal/peer/env.go index ed6a3af5391..fbee8f6808b 100644 --- a/client/internal/peer/env.go +++ b/client/internal/peer/env.go @@ -3,14 +3,32 @@ package peer import ( "os" "runtime" + "strconv" "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/shared/connectionmode" ) const ( + EnvKeyNBConnectionMode = "NB_CONNECTION_MODE" EnvKeyNBForceRelay = "NB_FORCE_RELAY" EnvKeyNBHomeRelayServers = "NB_HOME_RELAY_SERVERS" + + envEnableLazyConn = "NB_ENABLE_EXPERIMENTAL_LAZY_CONN" + envInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +var deprecationOnce sync.Map // env-var name -> *sync.Once + +// IsForceRelayed reports whether legacy NB_FORCE_RELAY is set, plus the +// runtime-special-case js (always relayed because of browser limitations). +// +// Deprecated: prefer ResolveModeFromEnv. Kept for callers that haven't +// migrated yet (Phase 1 backwards compat). func IsForceRelayed() bool { if runtime.GOOS == "js" { return true @@ -18,6 +36,65 @@ func IsForceRelayed() bool { return strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") } +// ResolveModeFromEnv reads all three legacy env vars plus the new +// NB_CONNECTION_MODE, applies the documented precedence and returns +// the resolved Mode and relay-timeout (in seconds, 0 if unset). +// +// Precedence: +// 1. NB_CONNECTION_MODE if parseable -> wins +// 2. NB_FORCE_RELAY=true -> ModeRelayForced (most-restrictive) +// 3. NB_ENABLE_EXPERIMENTAL_LAZY_CONN=true -> ModeP2PLazy +// 4. otherwise -> ModeUnspecified (caller falls through) +// +// NB_LAZY_CONN_INACTIVITY_THRESHOLD is parsed independently as the +// relay-timeout (alias) and emits a deprecation-warning if used. +func ResolveModeFromEnv() (connectionmode.Mode, uint32) { + mode := connectionmode.ModeUnspecified + + if raw := os.Getenv(EnvKeyNBConnectionMode); raw != "" { + parsed, err := connectionmode.ParseString(raw) + if err != nil { + log.Warnf("ignoring %s=%q: %v", EnvKeyNBConnectionMode, raw, err) + } else if parsed != connectionmode.ModeUnspecified { + mode = parsed + } + } + + if mode == connectionmode.ModeUnspecified { + if strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") { + warnDeprecated(EnvKeyNBForceRelay, EnvKeyNBConnectionMode+"=relay-forced") + mode = connectionmode.ModeRelayForced + } else if isLazyEnvTrue() { + warnDeprecated(envEnableLazyConn, EnvKeyNBConnectionMode+"=p2p-lazy") + mode = connectionmode.ModeP2PLazy + } + } + + timeoutSecs := uint32(0) + if raw := os.Getenv(envInactivityThreshold); raw != "" { + if d, err := time.ParseDuration(raw); err == nil { + timeoutSecs = uint32(d.Seconds()) + warnDeprecated(envInactivityThreshold, "the relay_timeout setting on the management server") + } else { + log.Warnf("ignoring %s=%q: %v", envInactivityThreshold, raw, err) + } + } + + return mode, timeoutSecs +} + +func isLazyEnvTrue() bool { + v, err := strconv.ParseBool(os.Getenv(envEnableLazyConn)) + return err == nil && v +} + +func warnDeprecated(envName, replacement string) { + once, _ := deprecationOnce.LoadOrStore(envName, &sync.Once{}) + once.(*sync.Once).Do(func() { + log.Warnf("env var %s is deprecated; use %s instead. The legacy var still works in this release but may be removed in a future major version.", envName, replacement) + }) +} + // OverrideRelayURLs returns the relay server URL list set in // NB_HOME_RELAY_SERVERS (comma-separated) and a boolean indicating whether // the override is active. When the env var is unset, the boolean is false diff --git a/client/internal/peer/env_test.go b/client/internal/peer/env_test.go new file mode 100644 index 00000000000..b70939243c6 --- /dev/null +++ b/client/internal/peer/env_test.go @@ -0,0 +1,58 @@ +package peer + +import ( + "testing" + + "github.com/netbirdio/netbird/shared/connectionmode" +) + +func TestResolveModeFromEnv(t *testing.T) { + cases := []struct { + name string + envConnMode string + envForceRelay string + envEnableLazy string + envInactivity string + wantMode connectionmode.Mode + wantTimeoutSecs uint32 + }{ + {"all unset", "", "", "", "", connectionmode.ModeUnspecified, 0}, + {"connection_mode wins", "p2p-dynamic", "true", "true", "10s", connectionmode.ModeP2PDynamic, 10}, + {"force_relay alone", "", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"lazy alone", "", "", "true", "", connectionmode.ModeP2PLazy, 0}, + {"force_relay AND lazy: force_relay wins", "", "true", "true", "", connectionmode.ModeRelayForced, 0}, + {"only inactivity threshold", "", "", "", "30m", connectionmode.ModeUnspecified, 1800}, + {"connection_mode unparsable falls through to legacy", "garbage", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"connection_mode parses p2p-lazy", "p2p-lazy", "", "", "", connectionmode.ModeP2PLazy, 0}, + {"force-relay value is true (case-insensitive)", "", "TRUE", "", "", connectionmode.ModeRelayForced, 0}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + t.Setenv(EnvKeyNBConnectionMode, c.envConnMode) + t.Setenv(EnvKeyNBForceRelay, c.envForceRelay) + t.Setenv("NB_ENABLE_EXPERIMENTAL_LAZY_CONN", c.envEnableLazy) + t.Setenv("NB_LAZY_CONN_INACTIVITY_THRESHOLD", c.envInactivity) + + gotMode, gotTimeout := ResolveModeFromEnv() + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotTimeout != c.wantTimeoutSecs { + t.Errorf("timeout = %v, want %v", gotTimeout, c.wantTimeoutSecs) + } + }) + } +} + +func TestIsForceRelayedBackwardsCompat(t *testing.T) { + // IsForceRelayed must remain functional for existing callers + // during the migration window (env.go still exposes it). + t.Setenv(EnvKeyNBForceRelay, "true") + if !IsForceRelayed() { + t.Error("IsForceRelayed() should return true when NB_FORCE_RELAY=true") + } + t.Setenv(EnvKeyNBForceRelay, "false") + if IsForceRelayed() { + t.Error("IsForceRelayed() should return false when NB_FORCE_RELAY=false") + } +} diff --git a/client/internal/peer/guard/guard.go b/client/internal/peer/guard/guard.go index 2e5efbcc5a3..0f7f70e899c 100644 --- a/client/internal/peer/guard/guard.go +++ b/client/internal/peer/guard/guard.go @@ -37,6 +37,10 @@ type Guard struct { srWatcher *SRWatcher relayedConnDisconnected chan struct{} iCEConnDisconnected chan struct{} + // onNetworkChange is called when signal/relay reconnects after a + // network change (e.g. LTE-modem replug, WiFi roaming). Set once + // before Start() is called; no lock needed. Phase 3.5 of #5989. + onNetworkChange func() } func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { @@ -50,6 +54,13 @@ func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duratio } } +// SetOnNetworkChange registers a callback that fires whenever the +// signal/relay layer reconnects after a network change. Must be called +// before Start(). Phase 3.5 of #5989. +func (g *Guard) SetOnNetworkChange(cb func()) { + g.onNetworkChange = cb +} + func (g *Guard) Start(ctx context.Context, eventCallback func()) { g.log.Infof("starting guard for reconnection with MaxInterval: %s", g.timeout) g.reconnectLoopWithRetry(ctx, eventCallback) @@ -130,6 +141,10 @@ func (g *Guard) reconnectLoopWithRetry(ctx context.Context, callback func()) { ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C iceState.reset() + // Phase 3.5 (#5989): notify Conn to reset iceBackoff + recreate workerICE + if g.onNetworkChange != nil { + g.onNetworkChange() + } case <-ctx.Done(): g.log.Debugf("context is done, stop reconnect loop") diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index 1d44096b640..b4c787e9fce 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -104,9 +104,30 @@ func (h *Handshaker) AddRelayListener(offer func(remoteOfferAnswer *OfferAnswer) } func (h *Handshaker) AddICEListener(offer func(remoteOfferAnswer *OfferAnswer)) { + h.mu.Lock() + defer h.mu.Unlock() h.iceListener = offer } +// RemoveICEListener clears the ICE-offer listener so subsequent remote +// offers no longer dispatch to workerICE. Idempotent; calling it when +// no listener was set is a no-op. Used by Conn.DetachICE in p2p-dynamic +// mode to deactivate ICE without tearing down the relay path. +func (h *Handshaker) RemoveICEListener() { + h.mu.Lock() + defer h.mu.Unlock() + h.iceListener = nil +} + +// readICEListener returns the current ICE listener under mutex protection. +// Used by Listen() so a concurrent RemoveICEListener cannot race with the +// dispatch loop. +func (h *Handshaker) readICEListener() func(*OfferAnswer) { + h.mu.Lock() + defer h.mu.Unlock() + return h.iceListener +} + func (h *Handshaker) Listen(ctx context.Context) { for { select { @@ -124,8 +145,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote OFFER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } if err := h.sendAnswer(); err != nil { @@ -146,8 +170,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote ANSWER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } case <-ctx.Done(): h.log.Infof("stop listening for remote offers and answers") diff --git a/client/internal/peer/handshaker_test.go b/client/internal/peer/handshaker_test.go new file mode 100644 index 00000000000..fdc95411eb8 --- /dev/null +++ b/client/internal/peer/handshaker_test.go @@ -0,0 +1,50 @@ +package peer + +import ( + "testing" +) + +func TestHandshaker_AddRemoveICEListener(t *testing.T) { + h := &Handshaker{} + listener := func(o *OfferAnswer) {} + + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("iceListener should be set after AddICEListener") + } + + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("iceListener should be nil after RemoveICEListener") + } + + // Idempotency: removing again is a no-op. + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("RemoveICEListener should be idempotent") + } + + // Re-add works. + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("re-adding the listener should work") + } +} + +func TestHandshaker_readICEListener(t *testing.T) { + h := &Handshaker{} + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener on empty Handshaker should return nil") + } + + listener := func(o *OfferAnswer) {} + h.AddICEListener(listener) + if got := h.readICEListener(); got == nil { + t.Fatal("readICEListener after AddICEListener should return non-nil") + } + + h.RemoveICEListener() + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener after RemoveICEListener should return nil") + } +} diff --git a/client/internal/peer/ice_backoff.go b/client/internal/peer/ice_backoff.go new file mode 100644 index 00000000000..4a600182ef4 --- /dev/null +++ b/client/internal/peer/ice_backoff.go @@ -0,0 +1,169 @@ +package peer + +import ( + "sync" + "time" + + "github.com/cenkalti/backoff/v4" +) + +const ( + // DefaultP2PRetryMax is the built-in fallback when the management + // server has not pushed a p2p_retry_max_seconds value (Proto wire + // value 0 = "not set"). Phase 3 of #5989. + DefaultP2PRetryMax = 15 * time.Minute + + iceBackoffInitialInterval = 1 * time.Minute + iceBackoffMultiplier = 2.0 + iceBackoffRandomizationFactor = 0.1 + + // networkChangeGracePeriod is the window after Reset() (signal/relay + // reconnect, network-change event) during which markFailure caps the + // suspend delay at networkChangeRetryDelay. Phase 3.7f of #5989. + // + // Rationale: the first ICE pair-check after a network change often + // fails on stale NAT mappings, even when subsequent attempts succeed. + // Falling back to the normal 1-minute initial backoff after that + // single failure leaves the peer on relay for far longer than the + // underlying connectivity actually warrants. A short fixed delay + // inside the grace window lets follow-up attempts run while the new + // LTE/Wi-Fi mapping is still fresh; outside the window the normal + // exponential schedule applies as before. + // + // Phase 3.7h widened the window from 30 s to 60 s and reduced the + // retry delay from 5 s to 2 s after observing real-world LTE-bounce + // behaviour: cold NAT mappings often need 3-4 ICE attempts to prime, + // and the previous 30 s window only fit ~2 attempts (each pair-check + // is ~12-15 s) before the schedule jumped to a 1-minute exponential + // suspend. The wider window plus shorter delay typically fits ~4-5 + // attempts and recovers within ~50 s for peers behind a single NAT + // instead of 2-3 minutes. + networkChangeGracePeriod = 60 * time.Second + networkChangeRetryDelay = 2 * time.Second +) + +// iceBackoffState tracks per-peer ICE-failure backoff in p2p-dynamic +// mode. Phase 3 of #5989. +type iceBackoffState struct { + mu sync.Mutex + bo *backoff.ExponentialBackOff + failures int + nextRetry time.Time + suspended bool + maxBackoff time.Duration + lastResetAt time.Time +} + +// BackoffSnapshot is a read-only view used by the status output. +type BackoffSnapshot struct { + Failures int + NextRetry time.Time + Suspended bool +} + +func newIceBackoff(maxBackoff time.Duration) *iceBackoffState { + return &iceBackoffState{ + bo: buildBackoff(maxBackoff), + maxBackoff: maxBackoff, + } +} + +func buildBackoff(maxBackoff time.Duration) *backoff.ExponentialBackOff { + bo := backoff.NewExponentialBackOff() + bo.InitialInterval = iceBackoffInitialInterval + bo.Multiplier = iceBackoffMultiplier + bo.RandomizationFactor = iceBackoffRandomizationFactor + bo.MaxInterval = maxBackoff + bo.MaxElapsedTime = 0 + bo.Reset() + return bo +} + +func (s *iceBackoffState) IsSuspended() bool { + s.mu.Lock() + defer s.mu.Unlock() + if !s.suspended { + return false + } + if time.Now().After(s.nextRetry) { + return false + } + return true +} + +// markFailure increments the failure counter and computes the next retry +// time. Returns the delay so callers can log it. If maxBackoff is 0 +// (= disabled), returns 0 and does not modify state. +// +// Phase 3.7f of #5989: while we are still inside networkChangeGracePeriod +// after the most recent Reset() (typically a srReconnect / network-change +// event), the suspend delay is capped at networkChangeRetryDelay and the +// long-term exponential schedule is NOT advanced. Once the grace window +// elapses, normal exponential backoff applies. This lets the second ICE +// pair-check run while a fresh LTE/Wi-Fi NAT mapping is still warm, +// without flooding signaling for chronically broken peers. +func (s *iceBackoffState) markFailure() time.Duration { + s.mu.Lock() + defer s.mu.Unlock() + if s.maxBackoff == 0 { + return 0 + } + s.failures++ + + var delay time.Duration + if !s.lastResetAt.IsZero() && time.Since(s.lastResetAt) < networkChangeGracePeriod { + delay = networkChangeRetryDelay + } else { + delay = s.bo.NextBackOff() + } + + s.nextRetry = time.Now().Add(delay) + s.suspended = true + return delay +} + +func (s *iceBackoffState) Snapshot() BackoffSnapshot { + s.mu.Lock() + defer s.mu.Unlock() + return BackoffSnapshot{ + Failures: s.failures, + NextRetry: s.nextRetry, + Suspended: s.suspended && time.Now().Before(s.nextRetry), + } +} + +// markSuccess clears the failure counter and resets the internal backoff +// to its initial interval. Called when pion reports ConnectionStateConnected. +func (s *iceBackoffState) markSuccess() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() +} + +// Reset is the hard reset triggered by interface-change or mode-push. +// In addition to clearing the failure counter and exponential schedule, +// it stamps lastResetAt so that markFailure can apply the +// post-network-change grace period (Phase 3.7f). +func (s *iceBackoffState) Reset() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() + s.lastResetAt = time.Now() +} + +// SetMaxBackoff updates the cap. Called from ConnMgr.UpdatedRemotePeerConfig +// when the server pushes a new value. Rebuilds the internal backoff with +// the new schedule but preserves the failure counter. +func (s *iceBackoffState) SetMaxBackoff(d time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + if d == s.maxBackoff { + return + } + s.maxBackoff = d + s.bo = buildBackoff(d) +} diff --git a/client/internal/peer/ice_backoff_test.go b/client/internal/peer/ice_backoff_test.go new file mode 100644 index 00000000000..85fd3a5a2e0 --- /dev/null +++ b/client/internal/peer/ice_backoff_test.go @@ -0,0 +1,182 @@ +package peer + +import ( + "testing" + "time" +) + +func TestIceBackoff_InitialState(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + if s.IsSuspended() { + t.Fatal("fresh state must not be suspended") + } + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("fresh state snapshot wrong: %+v", snap) + } +} + +func TestIceBackoff_SetMaxBackoff_Live(t *testing.T) { + s := newIceBackoff(1 * time.Minute) // tight cap + s.markFailure() // expect ~1m + s.markFailure() // expect ~1m (capped) + d2 := s.markFailure() // still ~1m + if d2 > 90*time.Second { + t.Errorf("with 1m cap, third failure should be ~1m, got %v", d2) + } + // Live update to 1h cap + s.SetMaxBackoff(60 * time.Minute) + // Subsequent failure produces a non-zero delay (jitter-dependent + // but should be > 0 since backoff was rebuilt). + d3 := s.markFailure() + if d3 <= 0 { + t.Errorf("after SetMaxBackoff: must produce non-zero delay, got %v", d3) + } +} + +func TestIceBackoff_SuccessReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + for i := 0; i < 5; i++ { + s.markFailure() + } + s.markSuccess() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after markSuccess: %+v", snap) + } + // Next failure must be back to step-1 magnitude (~1m) + delay := s.markFailure() + if delay > 70*time.Second { + t.Errorf("after success-reset, first failure must restart at ~1m, got %v", delay) + } +} + +func TestIceBackoff_HardReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + s.markFailure() + s.Reset() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after Reset: %+v", snap) + } +} + +func TestIceBackoff_SuspendedExpires(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + // Force nextRetry to past + s.mu.Lock() + s.nextRetry = time.Now().Add(-1 * time.Second) + s.mu.Unlock() + if s.IsSuspended() { + t.Fatal("expired suspend must report not suspended") + } +} + +func TestIceBackoff_ExponentialDoubling(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + expectedRanges := []struct { + min, max time.Duration + }{ + {50 * time.Second, 70 * time.Second}, // ~1m + {100 * time.Second, 140 * time.Second}, // ~2m + {210 * time.Second, 270 * time.Second}, // ~4m + {420 * time.Second, 540 * time.Second}, // ~8m + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + } + for i, exp := range expectedRanges { + delay := s.markFailure() + if delay < exp.min || delay > exp.max { + t.Errorf("failure #%d: delay %v outside expected range [%v, %v]", + i+1, delay, exp.min, exp.max) + } + } +} + +func TestIceBackoff_MaxBackoffOverride(t *testing.T) { + s := newIceBackoff(5 * time.Minute) // 300s cap + delays := []time.Duration{} + for i := 0; i < 5; i++ { + delays = append(delays, s.markFailure()) + } + // Last few should be capped at ~5m (300s) regardless of multiplier + for i := 2; i < 5; i++ { + if delays[i] > 6*time.Minute { + t.Errorf("failure #%d: delay %v exceeds 5m cap", i+1, delays[i]) + } + } +} + +func TestIceBackoff_MaxBackoffZero_Disabled(t *testing.T) { + s := newIceBackoff(0) + delay := s.markFailure() + if delay != 0 { + t.Errorf("disabled backoff must return 0 delay, got %v", delay) + } + if s.IsSuspended() { + t.Fatal("disabled backoff must not suspend") + } +} + +func TestIceBackoff_GracePeriodAfterReset_ShortDelay(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() // simulate srReconnect / network-change + + delay := s.markFailure() + if delay != networkChangeRetryDelay { + t.Fatalf("within grace window: expected %v, got %v", networkChangeRetryDelay, delay) + } + + // A second failure inside the grace window also uses the short delay + // (long-term exponential schedule is NOT advanced). + delay2 := s.markFailure() + if delay2 != networkChangeRetryDelay { + t.Fatalf("second failure inside grace: expected %v, got %v", networkChangeRetryDelay, delay2) + } +} + +func TestIceBackoff_GraceExpired_NormalExponential(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() + + // Force lastResetAt into the past so the grace window has expired. + s.mu.Lock() + s.lastResetAt = time.Now().Add(-2 * networkChangeGracePeriod) + s.mu.Unlock() + + delay := s.markFailure() + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("outside grace: expected ~1m exponential delay, got %v", delay) + } +} + +func TestIceBackoff_NoGraceWithoutReset(t *testing.T) { + // Fresh state without an explicit Reset must use the normal exponential + // schedule (lastResetAt is zero so the grace path does not apply). + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay < 50*time.Second { + t.Fatalf("fresh state without Reset: expected ~1m delay, got %v", delay) + } +} + +func TestIceBackoff_FirstFailure(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay <= 0 { + t.Fatalf("first failure must produce a positive delay, got %v", delay) + } + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("first failure delay should be ~1m (with 10%% jitter), got %v", delay) + } + if !s.IsSuspended() { + t.Fatal("after first failure must be suspended") + } + snap := s.Snapshot() + if snap.Failures != 1 || !snap.Suspended { + t.Fatalf("snapshot wrong: %+v", snap) + } +} diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index e8e61f660c9..daaddd56570 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -70,6 +70,10 @@ type State struct { RosenpassEnabled bool SSHHostKey []byte routes map[string]struct{} + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int + IceBackoffNextRetry time.Time + IceBackoffSuspended bool } // AddRoute add a single route to routes map @@ -360,6 +364,23 @@ func (d *Status) UpdatePeerState(receivedState State) error { return nil } +// UpdatePeerIceBackoff updates the ICE-backoff snapshot for a peer. +// Called by Conn.onICEFailed / onICEConnected so that the daemon +// status reflects current backoff state. Phase 3 of #5989. +func (d *Status) UpdatePeerIceBackoff(pubKey string, snap BackoffSnapshot) { + d.mux.Lock() + defer d.mux.Unlock() + + peerState, ok := d.peers[pubKey] + if !ok { + return + } + peerState.IceBackoffFailures = snap.Failures + peerState.IceBackoffNextRetry = snap.NextRetry + peerState.IceBackoffSuspended = snap.Suspended + d.peers[pubKey] = peerState +} + func (d *Status) AddPeerStateRoute(peer string, route string, resourceId route.ResID) error { d.mux.Lock() @@ -1348,6 +1369,9 @@ func (fs FullStatus) ToProto() *proto.FullStatus { Networks: networks, Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 29bf5aaaa74..f4c881c87cc 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -101,6 +101,20 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { defer w.muxAgent.Unlock() if w.agent != nil || w.agentConnecting { + // Phase 3.7c (#5989) re-introduces the Guard-Loop Fix from PR #5805. + // While the local ICE agent is mid-connection, ignore any incoming + // offer regardless of sessionID. Both sides' Guards fire fresh + // offers every ~800ms-30s (driven by their own iceRetryState + + // srReconnect events). If we tear down on every sessionID-change, + // the in-flight ICE pair-checks (~5-10s) never complete -- the + // remote's freshly-recreated agent generates yet another sessionID, + // loops back, infinite recreate cycle. Empirically observed on + // badmitterndorf during LTE-carrier instability: 5 different + // sessionIDs received from the remote in 2min, no P2P convergence. + if w.agentConnecting { + w.log.Debugf("agent connecting, skipping new offer (sessionID %s) to let pair-checks finish", remoteOfferAnswer.SessionIDString()) + return + } // backward compatibility with old clients that do not send session ID if remoteOfferAnswer.SessionID == nil { w.log.Debugf("agent already exists, skipping the offer") @@ -201,6 +215,21 @@ func (w *WorkerICE) InProgress() bool { return w.agentConnecting } +// IsConnected returns true when pion's ICE agent reports Connected and +// has not yet transitioned to Disconnected/Failed/Closed. Used by +// Conn.onNetworkChange (Phase 3.7g of #5989) to skip a needless +// workerICE.Close when an srReconnect/network-change event arrives but +// the existing P2P session is still alive end-to-end (typical for a +// brief signal-server outage while peer-to-peer UDP keeps flowing). +// Closing the agent in that case forces a 15-25 s renegotiation cycle +// and a Relay→ICE handover gap that the user would observe as a ping +// dropout, even though no real peer-to-peer connectivity loss occurred. +func (w *WorkerICE) IsConnected() bool { + w.muxAgent.Lock() + defer w.muxAgent.Unlock() + return w.agent != nil && w.lastKnownState == ice.ConnectionStateConnected +} + func (w *WorkerICE) Close() { w.muxAgent.Lock() defer w.muxAgent.Unlock() @@ -520,6 +549,8 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia case ice.ConnectionStateConnected: w.lastKnownState = ice.ConnectionStateConnected w.logSuccessfulPaths(agent) + // Phase 3 of #5989: reset backoff on ICE success. + w.conn.onICEConnected() return case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected, ice.ConnectionStateClosed: // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to @@ -531,6 +562,13 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia w.lastKnownState = ice.ConnectionStateDisconnected w.conn.onICEStateDisconnected(sessionChanged) } + + // Phase 3 of #5989: record failure in backoff only for true + // ICE failure (not for the synthetic Closed event that occurs + // when we recreate the agent on reconnect). + if state == ice.ConnectionStateFailed { + w.conn.onICEFailed() + } default: return } diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index 20c615d579d..2364392c702 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -96,6 +96,11 @@ type ConfigInput struct { LazyConnectionEnabled *bool + ConnectionMode *string + RelayTimeoutSeconds *uint32 + P2pTimeoutSeconds *uint32 + P2pRetryMaxSeconds *uint32 + MTU *uint16 } @@ -170,6 +175,13 @@ type Config struct { LazyConnectionEnabled bool + ConnectionMode string `json:",omitempty"` + RelayTimeoutSeconds uint32 `json:",omitempty"` + P2pTimeoutSeconds uint32 `json:",omitempty"` + // P2pRetryMaxSeconds caps the ICE-failure backoff schedule. 0 = use + // management-server value. Phase 3 of #5989. + P2pRetryMaxSeconds uint32 `json:"p2p_retry_max_seconds,omitempty"` + MTU uint16 } @@ -593,6 +605,27 @@ func (config *Config) apply(input ConfigInput) (updated bool, err error) { updated = true } + if input.ConnectionMode != nil && *input.ConnectionMode != config.ConnectionMode { + log.Infof("switching connection mode to %s", *input.ConnectionMode) + config.ConnectionMode = *input.ConnectionMode + updated = true + } + if input.RelayTimeoutSeconds != nil && *input.RelayTimeoutSeconds != config.RelayTimeoutSeconds { + log.Infof("switching relay timeout to %d seconds", *input.RelayTimeoutSeconds) + config.RelayTimeoutSeconds = *input.RelayTimeoutSeconds + updated = true + } + if input.P2pTimeoutSeconds != nil && *input.P2pTimeoutSeconds != config.P2pTimeoutSeconds { + log.Infof("switching p2p timeout to %d seconds", *input.P2pTimeoutSeconds) + config.P2pTimeoutSeconds = *input.P2pTimeoutSeconds + updated = true + } + if input.P2pRetryMaxSeconds != nil && *input.P2pRetryMaxSeconds != config.P2pRetryMaxSeconds { + log.Infof("switching p2p retry max to %d seconds", *input.P2pRetryMaxSeconds) + config.P2pRetryMaxSeconds = *input.P2pRetryMaxSeconds + updated = true + } + if input.MTU != nil && *input.MTU != config.MTU { log.Infof("updating MTU to %d (old value %d)", *input.MTU, config.MTU) config.MTU = *input.MTU diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 11e7877f2df..16bfa30f251 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -342,8 +342,18 @@ type LoginRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,37,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,38,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,39,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LoginRequest) Reset() { @@ -650,6 +660,34 @@ func (x *LoginRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *LoginRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *LoginRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type LoginResponse struct { state protoimpl.MessageState `protogen:"open.v1"` NeedsSSOLogin bool `protobuf:"varint,1,opt,name=needsSSOLogin,proto3" json:"needsSSOLogin,omitempty"` @@ -1182,8 +1220,28 @@ type GetConfigResponse struct { EnableSSHRemotePortForwarding bool `protobuf:"varint,23,opt,name=enableSSHRemotePortForwarding,proto3" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth bool `protobuf:"varint,25,opt,name=disableSSHAuth,proto3" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL int32 `protobuf:"varint,26,opt,name=sshJWTCacheTTL,proto3" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + ConnectionMode string `protobuf:"bytes,27,opt,name=connection_mode,json=connectionMode,proto3" json:"connection_mode,omitempty"` + P2PTimeoutSeconds uint32 `protobuf:"varint,28,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds uint32 `protobuf:"varint,29,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + P2PRetryMaxSeconds uint32 `protobuf:"varint,30,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3" json:"p2p_retry_max_seconds,omitempty"` + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + ServerPushedConnectionMode string `protobuf:"bytes,31,opt,name=server_pushed_connection_mode,json=serverPushedConnectionMode,proto3" json:"server_pushed_connection_mode,omitempty"` + ServerPushedRelayTimeoutSeconds uint32 `protobuf:"varint,32,opt,name=server_pushed_relay_timeout_seconds,json=serverPushedRelayTimeoutSeconds,proto3" json:"server_pushed_relay_timeout_seconds,omitempty"` + ServerPushedP2PTimeoutSeconds uint32 `protobuf:"varint,33,opt,name=server_pushed_p2p_timeout_seconds,json=serverPushedP2pTimeoutSeconds,proto3" json:"server_pushed_p2p_timeout_seconds,omitempty"` + ServerPushedP2PRetryMaxSeconds uint32 `protobuf:"varint,34,opt,name=server_pushed_p2p_retry_max_seconds,json=serverPushedP2pRetryMaxSeconds,proto3" json:"server_pushed_p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetConfigResponse) Reset() { @@ -1398,6 +1456,62 @@ func (x *GetConfigResponse) GetSshJWTCacheTTL() int32 { return 0 } +func (x *GetConfigResponse) GetConnectionMode() string { + if x != nil { + return x.ConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedConnectionMode() string { + if x != nil { + return x.ServerPushedConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetServerPushedRelayTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedRelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PRetryMaxSeconds + } + return 0 +} + // PeerState contains the latest state of a peer type PeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1419,8 +1533,12 @@ type PeerState struct { Latency *durationpb.Duration `protobuf:"bytes,17,opt,name=latency,proto3" json:"latency,omitempty"` RelayAddress string `protobuf:"bytes,18,opt,name=relayAddress,proto3" json:"relayAddress,omitempty"` SshHostKey []byte `protobuf:"bytes,19,opt,name=sshHostKey,proto3" json:"sshHostKey,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int32 `protobuf:"varint,20,opt,name=iceBackoffFailures,proto3" json:"iceBackoffFailures,omitempty"` + IceBackoffNextRetry *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=iceBackoffNextRetry,proto3" json:"iceBackoffNextRetry,omitempty"` + IceBackoffSuspended bool `protobuf:"varint,22,opt,name=iceBackoffSuspended,proto3" json:"iceBackoffSuspended,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PeerState) Reset() { @@ -1579,6 +1697,27 @@ func (x *PeerState) GetSshHostKey() []byte { return nil } +func (x *PeerState) GetIceBackoffFailures() int32 { + if x != nil { + return x.IceBackoffFailures + } + return 0 +} + +func (x *PeerState) GetIceBackoffNextRetry() *timestamppb.Timestamp { + if x != nil { + return x.IceBackoffNextRetry + } + return nil +} + +func (x *PeerState) GetIceBackoffSuspended() bool { + if x != nil { + return x.IceBackoffSuspended + } + return false +} + // LocalPeerState contains the latest state of the local peer type LocalPeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -4009,8 +4148,18 @@ type SetConfigRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,32,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,33,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,34,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SetConfigRequest) Reset() { @@ -4281,6 +4430,34 @@ func (x *SetConfigRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *SetConfigRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *SetConfigRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type SetConfigResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -6186,7 +6363,7 @@ var File_daemon_proto protoreflect.FileDescriptor const file_daemon_proto_rawDesc = "" + "\n" + "\fdaemon.proto\x12\x06daemon\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"\x0e\n" + - "\fEmptyRequest\"\xb6\x12\n" + + "\fEmptyRequest\"\xea\x14\n" + "\fLoginRequest\x12\x1a\n" + "\bsetupKey\x18\x01 \x01(\tR\bsetupKey\x12&\n" + "\fpreSharedKey\x18\x02 \x01(\tB\x02\x18\x01R\fpreSharedKey\x12$\n" + @@ -6230,7 +6407,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18$ \x01(\bH\x17R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18% \x01(\bH\x18R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18& \x01(\bH\x19R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x1bR\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x1cR\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1dR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1eR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6257,7 +6438,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\xb5\x01\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\xb5\x01\n" + "\rLoginResponse\x12$\n" + "\rneedsSSOLogin\x18\x01 \x01(\bR\rneedsSSOLogin\x12\x1a\n" + "\buserCode\x18\x02 \x01(\tR\buserCode\x12(\n" + @@ -6290,7 +6475,7 @@ const file_daemon_proto_rawDesc = "" + "\fDownResponse\"P\n" + "\x10GetConfigRequest\x12 \n" + "\vprofileName\x18\x01 \x01(\tR\vprofileName\x12\x1a\n" + - "\busername\x18\x02 \x01(\tR\busername\"\xdb\b\n" + + "\busername\x18\x02 \x01(\tR\busername\"\xc3\f\n" + "\x11GetConfigResponse\x12$\n" + "\rmanagementUrl\x18\x01 \x01(\tR\rmanagementUrl\x12\x1e\n" + "\n" + @@ -6321,7 +6506,15 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x16 \x01(\bR\x1cenableSSHLocalPortForwarding\x12D\n" + "\x1denableSSHRemotePortForwarding\x18\x17 \x01(\bR\x1denableSSHRemotePortForwarding\x12&\n" + "\x0edisableSSHAuth\x18\x19 \x01(\bR\x0edisableSSHAuth\x12&\n" + - "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\"\xfe\x05\n" + + "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\x12'\n" + + "\x0fconnection_mode\x18\x1b \x01(\tR\x0econnectionMode\x12.\n" + + "\x13p2p_timeout_seconds\x18\x1c \x01(\rR\x11p2pTimeoutSeconds\x122\n" + + "\x15relay_timeout_seconds\x18\x1d \x01(\rR\x13relayTimeoutSeconds\x121\n" + + "\x15p2p_retry_max_seconds\x18\x1e \x01(\rR\x12p2pRetryMaxSeconds\x12A\n" + + "\x1dserver_pushed_connection_mode\x18\x1f \x01(\tR\x1aserverPushedConnectionMode\x12L\n" + + "#server_pushed_relay_timeout_seconds\x18 \x01(\rR\x1fserverPushedRelayTimeoutSeconds\x12H\n" + + "!server_pushed_p2p_timeout_seconds\x18! \x01(\rR\x1dserverPushedP2pTimeoutSeconds\x12K\n" + + "#server_pushed_p2p_retry_max_seconds\x18\" \x01(\rR\x1eserverPushedP2pRetryMaxSeconds\"\xae\a\n" + "\tPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12\x1e\n" + @@ -6345,7 +6538,10 @@ const file_daemon_proto_rawDesc = "" + "\frelayAddress\x18\x12 \x01(\tR\frelayAddress\x12\x1e\n" + "\n" + "sshHostKey\x18\x13 \x01(\fR\n" + - "sshHostKey\"\xf0\x01\n" + + "sshHostKey\x12.\n" + + "\x12iceBackoffFailures\x18\x14 \x01(\x05R\x12iceBackoffFailures\x12L\n" + + "\x13iceBackoffNextRetry\x18\x15 \x01(\v2\x1a.google.protobuf.TimestampR\x13iceBackoffNextRetry\x120\n" + + "\x13iceBackoffSuspended\x18\x16 \x01(\bR\x13iceBackoffSuspended\"\xf0\x01\n" + "\x0eLocalPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12(\n" + @@ -6534,7 +6730,7 @@ const file_daemon_proto_rawDesc = "" + "\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01B\x0e\n" + "\f_profileNameB\v\n" + "\t_username\"\x17\n" + - "\x15SwitchProfileResponse\"\xdf\x10\n" + + "\x15SwitchProfileResponse\"\x93\x13\n" + "\x10SetConfigRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + "\vprofileName\x18\x02 \x01(\tR\vprofileName\x12$\n" + @@ -6573,7 +6769,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x1f \x01(\bH\x14R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18 \x01(\bH\x15R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18! \x01(\bH\x16R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x18R\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x19R\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1aR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1bR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6597,7 +6797,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\x13\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\x13\n" + "\x11SetConfigResponse\"Q\n" + "\x11AddProfileRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + @@ -6896,121 +7100,122 @@ var file_daemon_proto_depIdxs = []int32{ 102, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp 102, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp 101, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState - 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState - 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 98, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 99, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State - 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 102, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 100, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 101, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady - 101, // 34: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration - 101, // 35: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration - 30, // 36: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 5, // 37: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 7, // 38: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 9, // 39: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 11, // 40: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 13, // 41: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 15, // 42: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 26, // 43: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 28, // 44: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 28, // 45: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 4, // 46: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 35, // 47: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 37, // 48: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 39, // 49: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 42, // 50: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 44, // 51: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 46, // 52: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 48, // 53: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 51, // 54: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 92, // 55: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest - 94, // 56: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest - 96, // 57: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest - 54, // 58: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 56, // 59: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 58, // 60: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 60, // 61: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 62, // 62: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 64, // 63: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 66, // 64: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 69, // 65: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 71, // 66: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 73, // 67: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 75, // 68: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest - 77, // 69: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 79, // 70: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 81, // 71: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 83, // 72: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 85, // 73: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 87, // 74: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 89, // 75: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 6, // 76: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 8, // 77: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 10, // 78: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 12, // 79: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 14, // 80: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 16, // 81: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 27, // 82: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 29, // 83: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 29, // 84: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 34, // 85: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 36, // 86: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 38, // 87: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 40, // 88: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 43, // 89: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 45, // 90: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 47, // 91: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 49, // 92: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 53, // 93: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 93, // 94: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket - 95, // 95: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse - 97, // 96: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse - 55, // 97: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 57, // 98: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 59, // 99: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 61, // 100: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 63, // 101: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 65, // 102: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 67, // 103: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 70, // 104: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 72, // 105: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 74, // 106: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 76, // 107: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse - 78, // 108: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 109: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 110: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 84, // 111: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 86, // 112: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 88, // 113: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 90, // 114: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 76, // [76:115] is the sub-list for method output_type - 37, // [37:76] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 102, // 5: daemon.PeerState.iceBackoffNextRetry:type_name -> google.protobuf.Timestamp + 23, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 20, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 19, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 18, // 9: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 17, // 10: daemon.FullStatus.peers:type_name -> daemon.PeerState + 21, // 11: daemon.FullStatus.relays:type_name -> daemon.RelayState + 22, // 12: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 55, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 24, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 31, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 98, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 99, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 32, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 32, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 33, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 21: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 22: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 41, // 23: daemon.ListStatesResponse.states:type_name -> daemon.State + 50, // 24: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 52, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 2, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 3, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 102, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 100, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 55, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 101, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 68, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 33: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 91, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 101, // 35: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration + 101, // 36: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration + 30, // 37: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 5, // 38: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 7, // 39: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 9, // 40: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 11, // 41: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 13, // 42: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 15, // 43: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 26, // 44: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 28, // 45: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 28, // 46: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 4, // 47: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 35, // 48: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 37, // 49: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 39, // 50: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 42, // 51: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 44, // 52: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 46, // 53: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 48, // 54: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 51, // 55: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 92, // 56: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest + 94, // 57: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest + 96, // 58: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest + 54, // 59: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 56, // 60: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 58, // 61: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 60, // 62: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 62, // 63: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 64, // 64: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 66, // 65: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 69, // 66: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 71, // 67: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 73, // 68: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 75, // 69: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 77, // 70: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 79, // 71: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 81, // 72: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 83, // 73: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 74: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 87, // 75: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 89, // 76: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 6, // 77: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 8, // 78: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 10, // 79: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 12, // 80: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 14, // 81: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 16, // 82: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 27, // 83: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 29, // 84: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 29, // 85: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 34, // 86: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 36, // 87: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 38, // 88: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 40, // 89: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 43, // 90: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 45, // 91: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 47, // 92: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 49, // 93: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 53, // 94: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 93, // 95: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket + 95, // 96: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse + 97, // 97: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse + 55, // 98: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 57, // 99: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 59, // 100: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 61, // 101: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 63, // 102: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 65, // 103: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 67, // 104: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 70, // 105: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 72, // 106: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 74, // 107: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 76, // 108: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 78, // 109: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 110: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 111: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 112: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 113: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 88, // 114: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 90, // 115: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 77, // [77:116] is the sub-list for method output_type + 38, // [38:77] is the sub-list for method input_type + 38, // [38:38] is the sub-list for extension type_name + 38, // [38:38] is the sub-list for extension extendee + 0, // [0:38] is the sub-list for field type_name } func init() { file_daemon_proto_init() } diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 3fee9eca82d..fe87b63e793 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -204,6 +204,18 @@ message LoginRequest { optional bool enableSSHRemotePortForwarding = 37; optional bool disableSSHAuth = 38; optional int32 sshJWTCacheTTL = 39; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message LoginResponse { @@ -311,6 +323,28 @@ message GetConfigResponse { bool disableSSHAuth = 25; int32 sshJWTCacheTTL = 26; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + string connection_mode = 27; + uint32 p2p_timeout_seconds = 28; + uint32 relay_timeout_seconds = 29; + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + uint32 p2p_retry_max_seconds = 30; + + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + string server_pushed_connection_mode = 31; + uint32 server_pushed_relay_timeout_seconds = 32; + uint32 server_pushed_p2p_timeout_seconds = 33; + uint32 server_pushed_p2p_retry_max_seconds = 34; } // PeerState contains the latest state of a peer @@ -333,6 +367,10 @@ message PeerState { google.protobuf.Duration latency = 17; string relayAddress = 18; bytes sshHostKey = 19; + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + int32 iceBackoffFailures = 20; + google.protobuf.Timestamp iceBackoffNextRetry = 21; + bool iceBackoffSuspended = 22; } // LocalPeerState contains the latest state of the local peer @@ -672,6 +710,18 @@ message SetConfigRequest { optional bool enableSSHRemotePortForwarding = 32; optional bool disableSSHAuth = 33; optional int32 sshJWTCacheTTL = 34; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message SetConfigResponse{} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index 66a8efcc325..d5c16ac56f5 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.6.1 -// - protoc v6.33.1 -// source: daemon.proto package proto @@ -15,50 +11,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - DaemonService_Login_FullMethodName = "/daemon.DaemonService/Login" - DaemonService_WaitSSOLogin_FullMethodName = "/daemon.DaemonService/WaitSSOLogin" - DaemonService_Up_FullMethodName = "/daemon.DaemonService/Up" - DaemonService_Status_FullMethodName = "/daemon.DaemonService/Status" - DaemonService_Down_FullMethodName = "/daemon.DaemonService/Down" - DaemonService_GetConfig_FullMethodName = "/daemon.DaemonService/GetConfig" - DaemonService_ListNetworks_FullMethodName = "/daemon.DaemonService/ListNetworks" - DaemonService_SelectNetworks_FullMethodName = "/daemon.DaemonService/SelectNetworks" - DaemonService_DeselectNetworks_FullMethodName = "/daemon.DaemonService/DeselectNetworks" - DaemonService_ForwardingRules_FullMethodName = "/daemon.DaemonService/ForwardingRules" - DaemonService_DebugBundle_FullMethodName = "/daemon.DaemonService/DebugBundle" - DaemonService_GetLogLevel_FullMethodName = "/daemon.DaemonService/GetLogLevel" - DaemonService_SetLogLevel_FullMethodName = "/daemon.DaemonService/SetLogLevel" - DaemonService_ListStates_FullMethodName = "/daemon.DaemonService/ListStates" - DaemonService_CleanState_FullMethodName = "/daemon.DaemonService/CleanState" - DaemonService_DeleteState_FullMethodName = "/daemon.DaemonService/DeleteState" - DaemonService_SetSyncResponsePersistence_FullMethodName = "/daemon.DaemonService/SetSyncResponsePersistence" - DaemonService_TracePacket_FullMethodName = "/daemon.DaemonService/TracePacket" - DaemonService_StartCapture_FullMethodName = "/daemon.DaemonService/StartCapture" - DaemonService_StartBundleCapture_FullMethodName = "/daemon.DaemonService/StartBundleCapture" - DaemonService_StopBundleCapture_FullMethodName = "/daemon.DaemonService/StopBundleCapture" - DaemonService_SubscribeEvents_FullMethodName = "/daemon.DaemonService/SubscribeEvents" - DaemonService_GetEvents_FullMethodName = "/daemon.DaemonService/GetEvents" - DaemonService_SwitchProfile_FullMethodName = "/daemon.DaemonService/SwitchProfile" - DaemonService_SetConfig_FullMethodName = "/daemon.DaemonService/SetConfig" - DaemonService_AddProfile_FullMethodName = "/daemon.DaemonService/AddProfile" - DaemonService_RemoveProfile_FullMethodName = "/daemon.DaemonService/RemoveProfile" - DaemonService_ListProfiles_FullMethodName = "/daemon.DaemonService/ListProfiles" - DaemonService_GetActiveProfile_FullMethodName = "/daemon.DaemonService/GetActiveProfile" - DaemonService_Logout_FullMethodName = "/daemon.DaemonService/Logout" - DaemonService_GetFeatures_FullMethodName = "/daemon.DaemonService/GetFeatures" - DaemonService_TriggerUpdate_FullMethodName = "/daemon.DaemonService/TriggerUpdate" - DaemonService_GetPeerSSHHostKey_FullMethodName = "/daemon.DaemonService/GetPeerSSHHostKey" - DaemonService_RequestJWTAuth_FullMethodName = "/daemon.DaemonService/RequestJWTAuth" - DaemonService_WaitJWTToken_FullMethodName = "/daemon.DaemonService/WaitJWTToken" - DaemonService_StartCPUProfile_FullMethodName = "/daemon.DaemonService/StartCPUProfile" - DaemonService_StopCPUProfile_FullMethodName = "/daemon.DaemonService/StopCPUProfile" - DaemonService_GetInstallerResult_FullMethodName = "/daemon.DaemonService/GetInstallerResult" - DaemonService_ExposeService_FullMethodName = "/daemon.DaemonService/ExposeService" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // DaemonServiceClient is the client API for DaemonService service. // @@ -101,13 +55,13 @@ type DaemonServiceClient interface { TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) + StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) - SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) + SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) @@ -133,7 +87,7 @@ type DaemonServiceClient interface { StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) + ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) } type daemonServiceClient struct { @@ -145,9 +99,8 @@ func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { } func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoginResponse) - err := c.cc.Invoke(ctx, DaemonService_Login_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Login", in, out, opts...) if err != nil { return nil, err } @@ -155,9 +108,8 @@ func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts } func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitSSOLoginResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitSSOLogin_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitSSOLogin", in, out, opts...) if err != nil { return nil, err } @@ -165,9 +117,8 @@ func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLogin } func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpResponse) - err := c.cc.Invoke(ctx, DaemonService_Up_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Up", in, out, opts...) if err != nil { return nil, err } @@ -175,9 +126,8 @@ func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grp } func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatusResponse) - err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Status", in, out, opts...) if err != nil { return nil, err } @@ -185,9 +135,8 @@ func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opt } func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DownResponse) - err := c.cc.Invoke(ctx, DaemonService_Down_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Down", in, out, opts...) if err != nil { return nil, err } @@ -195,9 +144,8 @@ func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts .. } func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_GetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetConfig", in, out, opts...) if err != nil { return nil, err } @@ -205,9 +153,8 @@ func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigReques } func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_ListNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListNetworks", in, out, opts...) if err != nil { return nil, err } @@ -215,9 +162,8 @@ func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworks } func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_SelectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SelectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -225,9 +171,8 @@ func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetw } func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_DeselectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeselectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -235,9 +180,8 @@ func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNe } func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*ForwardingRulesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ForwardingRulesResponse) - err := c.cc.Invoke(ctx, DaemonService_ForwardingRules_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ForwardingRules", in, out, opts...) if err != nil { return nil, err } @@ -245,9 +189,8 @@ func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequ } func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRequest, opts ...grpc.CallOption) (*DebugBundleResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DebugBundleResponse) - err := c.cc.Invoke(ctx, DaemonService_DebugBundle_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DebugBundle", in, out, opts...) if err != nil { return nil, err } @@ -255,9 +198,8 @@ func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRe } func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRequest, opts ...grpc.CallOption) (*GetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_GetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -265,9 +207,8 @@ func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRe } func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRequest, opts ...grpc.CallOption) (*SetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_SetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -275,9 +216,8 @@ func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRe } func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListStatesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListStates_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListStates", in, out, opts...) if err != nil { return nil, err } @@ -285,9 +225,8 @@ func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequ } func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequest, opts ...grpc.CallOption) (*CleanStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CleanStateResponse) - err := c.cc.Invoke(ctx, DaemonService_CleanState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/CleanState", in, out, opts...) if err != nil { return nil, err } @@ -295,9 +234,8 @@ func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequ } func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteStateResponse) - err := c.cc.Invoke(ctx, DaemonService_DeleteState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeleteState", in, out, opts...) if err != nil { return nil, err } @@ -305,9 +243,8 @@ func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRe } func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetSyncResponsePersistenceResponse) - err := c.cc.Invoke(ctx, DaemonService_SetSyncResponsePersistence_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetSyncResponsePersistence", in, out, opts...) if err != nil { return nil, err } @@ -315,22 +252,20 @@ func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in } func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TracePacketResponse) - err := c.cc.Invoke(ctx, DaemonService_TracePacket_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TracePacket", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_StartCapture_FullMethodName, cOpts...) +func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], "/daemon.DaemonService/StartCapture", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[StartCaptureRequest, CapturePacket]{ClientStream: stream} + x := &daemonServiceStartCaptureClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -340,13 +275,26 @@ func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCapture return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureClient = grpc.ServerStreamingClient[CapturePacket] +type DaemonService_StartCaptureClient interface { + Recv() (*CapturePacket, error) + grpc.ClientStream +} + +type daemonServiceStartCaptureClient struct { + grpc.ClientStream +} + +func (x *daemonServiceStartCaptureClient) Recv() (*CapturePacket, error) { + m := new(CapturePacket) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StartBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartBundleCapture", in, out, opts...) if err != nil { return nil, err } @@ -354,22 +302,20 @@ func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartB } func (c *daemonServiceClient) StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StopBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopBundleCapture", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_SubscribeEvents_FullMethodName, cOpts...) +func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], "/daemon.DaemonService/SubscribeEvents", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[SubscribeRequest, SystemEvent]{ClientStream: stream} + x := &daemonServiceSubscribeEventsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -379,13 +325,26 @@ func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *Subscribe return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsClient = grpc.ServerStreamingClient[SystemEvent] +type DaemonService_SubscribeEventsClient interface { + Recv() (*SystemEvent, error) + grpc.ClientStream +} + +type daemonServiceSubscribeEventsClient struct { + grpc.ClientStream +} + +func (x *daemonServiceSubscribeEventsClient) Recv() (*SystemEvent, error) { + m := new(SystemEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetEventsResponse) - err := c.cc.Invoke(ctx, DaemonService_GetEvents_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetEvents", in, out, opts...) if err != nil { return nil, err } @@ -393,9 +352,8 @@ func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsReques } func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SwitchProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_SwitchProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SwitchProfile", in, out, opts...) if err != nil { return nil, err } @@ -403,9 +361,8 @@ func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfi } func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_SetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetConfig", in, out, opts...) if err != nil { return nil, err } @@ -413,9 +370,8 @@ func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigReques } func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequest, opts ...grpc.CallOption) (*AddProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_AddProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/AddProfile", in, out, opts...) if err != nil { return nil, err } @@ -423,9 +379,8 @@ func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequ } func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfileRequest, opts ...grpc.CallOption) (*RemoveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RemoveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_RemoveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RemoveProfile", in, out, opts...) if err != nil { return nil, err } @@ -433,9 +388,8 @@ func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfi } func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfilesRequest, opts ...grpc.CallOption) (*ListProfilesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListProfilesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListProfiles_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListProfiles", in, out, opts...) if err != nil { return nil, err } @@ -443,9 +397,8 @@ func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfiles } func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiveProfileRequest, opts ...grpc.CallOption) (*GetActiveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetActiveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_GetActiveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetActiveProfile", in, out, opts...) if err != nil { return nil, err } @@ -453,9 +406,8 @@ func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiv } func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LogoutResponse) - err := c.cc.Invoke(ctx, DaemonService_Logout_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Logout", in, out, opts...) if err != nil { return nil, err } @@ -463,9 +415,8 @@ func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opt } func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetFeaturesResponse) - err := c.cc.Invoke(ctx, DaemonService_GetFeatures_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetFeatures", in, out, opts...) if err != nil { return nil, err } @@ -473,9 +424,8 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe } func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TriggerUpdateResponse) - err := c.cc.Invoke(ctx, DaemonService_TriggerUpdate_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...) if err != nil { return nil, err } @@ -483,9 +433,8 @@ func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpda } func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPeerSSHHostKeyResponse) - err := c.cc.Invoke(ctx, DaemonService_GetPeerSSHHostKey_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...) if err != nil { return nil, err } @@ -493,9 +442,8 @@ func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeer } func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RequestJWTAuthResponse) - err := c.cc.Invoke(ctx, DaemonService_RequestJWTAuth_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RequestJWTAuth", in, out, opts...) if err != nil { return nil, err } @@ -503,9 +451,8 @@ func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWT } func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitJWTTokenResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitJWTToken_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitJWTToken", in, out, opts...) if err != nil { return nil, err } @@ -513,9 +460,8 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken } func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StartCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -523,9 +469,8 @@ func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUP } func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StopCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -533,22 +478,20 @@ func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUPro } func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InstallerResultResponse) - err := c.cc.Invoke(ctx, DaemonService_GetInstallerResult_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetInstallerResult", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], DaemonService_ExposeService_FullMethodName, cOpts...) +func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], "/daemon.DaemonService/ExposeService", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ExposeServiceRequest, ExposeServiceEvent]{ClientStream: stream} + x := &daemonServiceExposeServiceClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -558,12 +501,26 @@ func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServi return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceClient = grpc.ServerStreamingClient[ExposeServiceEvent] +type DaemonService_ExposeServiceClient interface { + Recv() (*ExposeServiceEvent, error) + grpc.ClientStream +} + +type daemonServiceExposeServiceClient struct { + grpc.ClientStream +} + +func (x *daemonServiceExposeServiceClient) Recv() (*ExposeServiceEvent, error) { + m := new(ExposeServiceEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer -// for forward compatibility. +// for forward compatibility type DaemonServiceServer interface { // Login uses setup key to prepare configuration for the daemon. Login(context.Context, *LoginRequest) (*LoginResponse, error) @@ -602,13 +559,13 @@ type DaemonServiceServer interface { TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error + StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) - SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error + SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) @@ -634,136 +591,132 @@ type DaemonServiceServer interface { StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error + ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error mustEmbedUnimplementedDaemonServiceServer() } -// UnimplementedDaemonServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedDaemonServiceServer struct{} +// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations. +type UnimplementedDaemonServiceServer struct { +} func (UnimplementedDaemonServiceServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Login not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Login not implemented") } func (UnimplementedDaemonServiceServer) WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitSSOLogin not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitSSOLogin not implemented") } func (UnimplementedDaemonServiceServer) Up(context.Context, *UpRequest) (*UpResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Up not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Up not implemented") } func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Status not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") } func (UnimplementedDaemonServiceServer) Down(context.Context, *DownRequest) (*DownResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Down not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Down not implemented") } func (UnimplementedDaemonServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented") } func (UnimplementedDaemonServiceServer) ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListNetworks not implemented") } func (UnimplementedDaemonServiceServer) SelectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SelectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SelectNetworks not implemented") } func (UnimplementedDaemonServiceServer) DeselectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeselectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeselectNetworks not implemented") } func (UnimplementedDaemonServiceServer) ForwardingRules(context.Context, *EmptyRequest) (*ForwardingRulesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ForwardingRules not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ForwardingRules not implemented") } func (UnimplementedDaemonServiceServer) DebugBundle(context.Context, *DebugBundleRequest) (*DebugBundleResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DebugBundle not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DebugBundle not implemented") } func (UnimplementedDaemonServiceServer) GetLogLevel(context.Context, *GetLogLevelRequest) (*GetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) SetLogLevel(context.Context, *SetLogLevelRequest) (*SetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListStates not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListStates not implemented") } func (UnimplementedDaemonServiceServer) CleanState(context.Context, *CleanStateRequest) (*CleanStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CleanState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method CleanState not implemented") } func (UnimplementedDaemonServiceServer) DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeleteState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeleteState not implemented") } func (UnimplementedDaemonServiceServer) SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") } func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TracePacket not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TracePacket not implemented") } -func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error { - return status.Error(codes.Unimplemented, "method StartCapture not implemented") +func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error { + return status.Errorf(codes.Unimplemented, "method StartCapture not implemented") } func (UnimplementedDaemonServiceServer) StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartBundleCapture not implemented") } func (UnimplementedDaemonServiceServer) StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopBundleCapture not implemented") } -func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error { - return status.Error(codes.Unimplemented, "method SubscribeEvents not implemented") +func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeEvents not implemented") } func (UnimplementedDaemonServiceServer) GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetEvents not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetEvents not implemented") } func (UnimplementedDaemonServiceServer) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SwitchProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SwitchProfile not implemented") } func (UnimplementedDaemonServiceServer) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetConfig not implemented") } func (UnimplementedDaemonServiceServer) AddProfile(context.Context, *AddProfileRequest) (*AddProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method AddProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method AddProfile not implemented") } func (UnimplementedDaemonServiceServer) RemoveProfile(context.Context, *RemoveProfileRequest) (*RemoveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RemoveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RemoveProfile not implemented") } func (UnimplementedDaemonServiceServer) ListProfiles(context.Context, *ListProfilesRequest) (*ListProfilesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListProfiles not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListProfiles not implemented") } func (UnimplementedDaemonServiceServer) GetActiveProfile(context.Context, *GetActiveProfileRequest) (*GetActiveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetActiveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetActiveProfile not implemented") } func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Logout not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetFeatures not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented") } func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TriggerUpdate not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented") } func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") } func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RequestJWTAuth not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RequestJWTAuth not implemented") } func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitJWTToken not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") } func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetInstallerResult not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetInstallerResult not implemented") } -func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error { - return status.Error(codes.Unimplemented, "method ExposeService not implemented") +func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error { + return status.Errorf(codes.Unimplemented, "method ExposeService not implemented") } func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} -func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to DaemonServiceServer will @@ -773,13 +726,6 @@ type UnsafeDaemonServiceServer interface { } func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { - // If the following call panics, it indicates UnimplementedDaemonServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&DaemonService_ServiceDesc, srv) } @@ -793,7 +739,7 @@ func _DaemonService_Login_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Login_FullMethodName, + FullMethod: "/daemon.DaemonService/Login", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Login(ctx, req.(*LoginRequest)) @@ -811,7 +757,7 @@ func _DaemonService_WaitSSOLogin_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitSSOLogin_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitSSOLogin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitSSOLogin(ctx, req.(*WaitSSOLoginRequest)) @@ -829,7 +775,7 @@ func _DaemonService_Up_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Up_FullMethodName, + FullMethod: "/daemon.DaemonService/Up", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Up(ctx, req.(*UpRequest)) @@ -847,7 +793,7 @@ func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Status_FullMethodName, + FullMethod: "/daemon.DaemonService/Status", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) @@ -865,7 +811,7 @@ func _DaemonService_Down_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Down_FullMethodName, + FullMethod: "/daemon.DaemonService/Down", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Down(ctx, req.(*DownRequest)) @@ -883,7 +829,7 @@ func _DaemonService_GetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/GetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) @@ -901,7 +847,7 @@ func _DaemonService_ListNetworks_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/ListNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListNetworks(ctx, req.(*ListNetworksRequest)) @@ -919,7 +865,7 @@ func _DaemonService_SelectNetworks_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SelectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/SelectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SelectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -937,7 +883,7 @@ func _DaemonService_DeselectNetworks_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeselectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/DeselectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeselectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -955,7 +901,7 @@ func _DaemonService_ForwardingRules_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ForwardingRules_FullMethodName, + FullMethod: "/daemon.DaemonService/ForwardingRules", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ForwardingRules(ctx, req.(*EmptyRequest)) @@ -973,7 +919,7 @@ func _DaemonService_DebugBundle_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DebugBundle_FullMethodName, + FullMethod: "/daemon.DaemonService/DebugBundle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DebugBundle(ctx, req.(*DebugBundleRequest)) @@ -991,7 +937,7 @@ func _DaemonService_GetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/GetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetLogLevel(ctx, req.(*GetLogLevelRequest)) @@ -1009,7 +955,7 @@ func _DaemonService_SetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/SetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetLogLevel(ctx, req.(*SetLogLevelRequest)) @@ -1027,7 +973,7 @@ func _DaemonService_ListStates_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListStates_FullMethodName, + FullMethod: "/daemon.DaemonService/ListStates", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListStates(ctx, req.(*ListStatesRequest)) @@ -1045,7 +991,7 @@ func _DaemonService_CleanState_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_CleanState_FullMethodName, + FullMethod: "/daemon.DaemonService/CleanState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).CleanState(ctx, req.(*CleanStateRequest)) @@ -1063,7 +1009,7 @@ func _DaemonService_DeleteState_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeleteState_FullMethodName, + FullMethod: "/daemon.DaemonService/DeleteState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeleteState(ctx, req.(*DeleteStateRequest)) @@ -1081,7 +1027,7 @@ func _DaemonService_SetSyncResponsePersistence_Handler(srv interface{}, ctx cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetSyncResponsePersistence_FullMethodName, + FullMethod: "/daemon.DaemonService/SetSyncResponsePersistence", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetSyncResponsePersistence(ctx, req.(*SetSyncResponsePersistenceRequest)) @@ -1099,7 +1045,7 @@ func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TracePacket_FullMethodName, + FullMethod: "/daemon.DaemonService/TracePacket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TracePacket(ctx, req.(*TracePacketRequest)) @@ -1112,11 +1058,21 @@ func _DaemonService_StartCapture_Handler(srv interface{}, stream grpc.ServerStre if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).StartCapture(m, &grpc.GenericServerStream[StartCaptureRequest, CapturePacket]{ServerStream: stream}) + return srv.(DaemonServiceServer).StartCapture(m, &daemonServiceStartCaptureServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureServer = grpc.ServerStreamingServer[CapturePacket] +type DaemonService_StartCaptureServer interface { + Send(*CapturePacket) error + grpc.ServerStream +} + +type daemonServiceStartCaptureServer struct { + grpc.ServerStream +} + +func (x *daemonServiceStartCaptureServer) Send(m *CapturePacket) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartBundleCaptureRequest) @@ -1128,7 +1084,7 @@ func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StartBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartBundleCapture(ctx, req.(*StartBundleCaptureRequest)) @@ -1146,7 +1102,7 @@ func _DaemonService_StopBundleCapture_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StopBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopBundleCapture(ctx, req.(*StopBundleCaptureRequest)) @@ -1159,11 +1115,21 @@ func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerS if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).SubscribeEvents(m, &grpc.GenericServerStream[SubscribeRequest, SystemEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).SubscribeEvents(m, &daemonServiceSubscribeEventsServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsServer = grpc.ServerStreamingServer[SystemEvent] +type DaemonService_SubscribeEventsServer interface { + Send(*SystemEvent) error + grpc.ServerStream +} + +type daemonServiceSubscribeEventsServer struct { + grpc.ServerStream +} + +func (x *daemonServiceSubscribeEventsServer) Send(m *SystemEvent) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetEventsRequest) @@ -1175,7 +1141,7 @@ func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetEvents_FullMethodName, + FullMethod: "/daemon.DaemonService/GetEvents", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetEvents(ctx, req.(*GetEventsRequest)) @@ -1193,7 +1159,7 @@ func _DaemonService_SwitchProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SwitchProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/SwitchProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SwitchProfile(ctx, req.(*SwitchProfileRequest)) @@ -1211,7 +1177,7 @@ func _DaemonService_SetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/SetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetConfig(ctx, req.(*SetConfigRequest)) @@ -1229,7 +1195,7 @@ func _DaemonService_AddProfile_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_AddProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/AddProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).AddProfile(ctx, req.(*AddProfileRequest)) @@ -1247,7 +1213,7 @@ func _DaemonService_RemoveProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RemoveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/RemoveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RemoveProfile(ctx, req.(*RemoveProfileRequest)) @@ -1265,7 +1231,7 @@ func _DaemonService_ListProfiles_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListProfiles_FullMethodName, + FullMethod: "/daemon.DaemonService/ListProfiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListProfiles(ctx, req.(*ListProfilesRequest)) @@ -1283,7 +1249,7 @@ func _DaemonService_GetActiveProfile_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetActiveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/GetActiveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetActiveProfile(ctx, req.(*GetActiveProfileRequest)) @@ -1301,7 +1267,7 @@ func _DaemonService_Logout_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Logout_FullMethodName, + FullMethod: "/daemon.DaemonService/Logout", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Logout(ctx, req.(*LogoutRequest)) @@ -1319,7 +1285,7 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetFeatures_FullMethodName, + FullMethod: "/daemon.DaemonService/GetFeatures", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetFeatures(ctx, req.(*GetFeaturesRequest)) @@ -1337,7 +1303,7 @@ func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TriggerUpdate_FullMethodName, + FullMethod: "/daemon.DaemonService/TriggerUpdate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest)) @@ -1355,7 +1321,7 @@ func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetPeerSSHHostKey_FullMethodName, + FullMethod: "/daemon.DaemonService/GetPeerSSHHostKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetPeerSSHHostKey(ctx, req.(*GetPeerSSHHostKeyRequest)) @@ -1373,7 +1339,7 @@ func _DaemonService_RequestJWTAuth_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RequestJWTAuth_FullMethodName, + FullMethod: "/daemon.DaemonService/RequestJWTAuth", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RequestJWTAuth(ctx, req.(*RequestJWTAuthRequest)) @@ -1391,7 +1357,7 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitJWTToken_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitJWTToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitJWTToken(ctx, req.(*WaitJWTTokenRequest)) @@ -1409,7 +1375,7 @@ func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StartCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) @@ -1427,7 +1393,7 @@ func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StopCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) @@ -1445,7 +1411,7 @@ func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetInstallerResult_FullMethodName, + FullMethod: "/daemon.DaemonService/GetInstallerResult", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetInstallerResult(ctx, req.(*InstallerResultRequest)) @@ -1458,11 +1424,21 @@ func _DaemonService_ExposeService_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).ExposeService(m, &grpc.GenericServerStream[ExposeServiceRequest, ExposeServiceEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).ExposeService(m, &daemonServiceExposeServiceServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceServer = grpc.ServerStreamingServer[ExposeServiceEvent] +type DaemonService_ExposeServiceServer interface { + Send(*ExposeServiceEvent) error + grpc.ServerStream +} + +type daemonServiceExposeServiceServer struct { + grpc.ServerStream +} + +func (x *daemonServiceExposeServiceServer) Send(m *ExposeServiceEvent) error { + return x.ServerStream.SendMsg(m) +} // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/client/server/server.go b/client/server/server.go index 648ffa8ce6a..2e985a7a66c 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1515,31 +1515,61 @@ func (s *Server) GetConfig(ctx context.Context, req *proto.GetConfigRequest) (*p sshJWTCacheTTL = int32(*cfg.SSHJWTCacheTTL) } + // Surface what the management server most recently pushed via + // PeerConfig so the UI can show "Follow server (currently: )" + // and use the numeric defaults as placeholders in the override + // fields. All zero/empty when the engine has not received PeerConfig + // yet -- the UI handles that gracefully. + var ( + spMode string + spRelayTOSecs uint32 + spP2pTOSecs uint32 + spP2pRetMax uint32 + ) + if s.connectClient != nil { + if eng := s.connectClient.Engine(); eng != nil { + if cm := eng.ConnMgr(); cm != nil { + spMode = cm.ServerPushedMode().String() + spRelayTOSecs = cm.ServerPushedRelayTimeoutSecs() + spP2pTOSecs = cm.ServerPushedP2pTimeoutSecs() + spP2pRetMax = cm.ServerPushedP2pRetryMaxSecs() + } + } + } + return &proto.GetConfigResponse{ - ManagementUrl: managementURL.String(), - PreSharedKey: preSharedKey, - AdminURL: adminURL.String(), - InterfaceName: cfg.WgIface, - WireguardPort: int64(cfg.WgPort), - Mtu: int64(cfg.MTU), - DisableAutoConnect: cfg.DisableAutoConnect, - ServerSSHAllowed: *cfg.ServerSSHAllowed, - RosenpassEnabled: cfg.RosenpassEnabled, - RosenpassPermissive: cfg.RosenpassPermissive, - LazyConnectionEnabled: cfg.LazyConnectionEnabled, - BlockInbound: cfg.BlockInbound, - DisableNotifications: disableNotifications, - NetworkMonitor: networkMonitor, - DisableDns: disableDNS, - DisableClientRoutes: disableClientRoutes, - DisableServerRoutes: disableServerRoutes, - BlockLanAccess: blockLANAccess, - EnableSSHRoot: enableSSHRoot, - EnableSSHSFTP: enableSSHSFTP, - EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, - EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, - DisableSSHAuth: disableSSHAuth, - SshJWTCacheTTL: sshJWTCacheTTL, + ManagementUrl: managementURL.String(), + PreSharedKey: preSharedKey, + AdminURL: adminURL.String(), + InterfaceName: cfg.WgIface, + WireguardPort: int64(cfg.WgPort), + Mtu: int64(cfg.MTU), + DisableAutoConnect: cfg.DisableAutoConnect, + ServerSSHAllowed: *cfg.ServerSSHAllowed, + RosenpassEnabled: cfg.RosenpassEnabled, + RosenpassPermissive: cfg.RosenpassPermissive, + LazyConnectionEnabled: cfg.LazyConnectionEnabled, + BlockInbound: cfg.BlockInbound, + DisableNotifications: disableNotifications, + NetworkMonitor: networkMonitor, + DisableDns: disableDNS, + DisableClientRoutes: disableClientRoutes, + DisableServerRoutes: disableServerRoutes, + BlockLanAccess: blockLANAccess, + EnableSSHRoot: enableSSHRoot, + EnableSSHSFTP: enableSSHSFTP, + EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, + EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, + DisableSSHAuth: disableSSHAuth, + SshJWTCacheTTL: sshJWTCacheTTL, + ConnectionMode: cfg.ConnectionMode, + P2PTimeoutSeconds: cfg.P2pTimeoutSeconds, + RelayTimeoutSeconds: cfg.RelayTimeoutSeconds, + P2PRetryMaxSeconds: cfg.P2pRetryMaxSeconds, + ServerPushedConnectionMode: spMode, + ServerPushedRelayTimeoutSeconds: spRelayTOSecs, + ServerPushedP2PTimeoutSeconds: spP2pTOSecs, + ServerPushedP2PRetryMaxSeconds: spP2pRetMax, }, nil } diff --git a/client/server/setconfig_test.go b/client/server/setconfig_test.go index b90b5653dc4..9d8ce003e5b 100644 --- a/client/server/setconfig_test.go +++ b/client/server/setconfig_test.go @@ -201,6 +201,17 @@ func verifyAllFieldsCovered(t *testing.T, req *proto.SetConfigRequest) { "EnableSSHRemotePortForwarding": true, "DisableSSHAuth": true, "SshJWTCacheTTL": true, + // Phase 3.7i Connection-Mode fields. Currently in the proto so + // daemons can advertise them via GetConfig, but SetConfig does + // NOT apply them at runtime — they're only persisted via + // `netbird service install/reconfigure --connection-mode/...` + // (writes the active profile file directly; daemon picks up on + // next start). Wiring them through SetConfig is a follow-up + // task. Listed here so the structural test passes. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } val := reflect.ValueOf(req).Elem() @@ -265,6 +276,17 @@ func TestCLIFlags_MappedToSetConfig(t *testing.T) { // SetConfigRequest fields that don't have CLI flags (settable only via UI or other means). fieldsWithoutCLIFlags := map[string]bool{ "DisableNotifications": true, // Only settable via UI + // Phase 3.7i Connection-Mode fields: have CLI flags + // (--connection-mode, --relay-timeout, --p2p-timeout, + // --p2p-retry-max) but those flags belong to the + // `netbird service install/reconfigure` command, not `up`, + // and they bypass the SetConfig RPC entirely (write directly + // to the active profile file). So from this test's + // perspective they have no SetConfig-mapped CLI flag. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } // Get all SetConfigRequest fields to verify our map is complete. diff --git a/client/status/status.go b/client/status/status.go index 8c932bbab29..60da8f303f9 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -73,6 +73,10 @@ type PeerStateDetailOutput struct { Latency time.Duration `json:"latency" yaml:"latency"` RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` Networks []string `json:"networks" yaml:"networks"` + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int `json:"iceBackoffFailures" yaml:"iceBackoffFailures"` + IceBackoffNextRetry time.Time `json:"iceBackoffNextRetry" yaml:"iceBackoffNextRetry"` + IceBackoffSuspended bool `json:"iceBackoffSuspended" yaml:"iceBackoffSuspended"` } type PeersStateOutput struct { @@ -337,6 +341,9 @@ func mapPeers( Latency: pbPeerState.GetLatency().AsDuration(), RosenpassEnabled: pbPeerState.GetRosenpassEnabled(), Networks: pbPeerState.GetNetworks(), + IceBackoffFailures: int(pbPeerState.GetIceBackoffFailures()), + IceBackoffNextRetry: iceBackoffNextRetry(pbPeerState), + IceBackoffSuspended: pbPeerState.GetIceBackoffSuspended(), } peersStateDetail = append(peersStateDetail, peerState) @@ -645,6 +652,9 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { Networks: maps.Keys(peerState.GetRoutes()), Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } @@ -683,6 +693,17 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { return &pbFullStatus } +// iceBackoffNextRetry returns the ICE backoff next-retry time from a proto +// PeerState. If the timestamp field is unset (nil), it returns Go's zero +// time to match the daemon's zero-valued State.IceBackoffNextRetry. +func iceBackoffNextRetry(pbPeerState *proto.PeerState) time.Time { + ts := pbPeerState.GetIceBackoffNextRetry() + if ts == nil { + return time.Time{} + } + return ts.AsTime().Local() +} + func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bool) string { var ( peersString = "" @@ -768,6 +789,21 @@ func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bo peerState.Latency.String(), ) + // Phase 3 (#5989): append ICE-backoff line only when suspended AND + // the suspension has not yet expired by wall-clock. The PeerState + // snapshot is only refreshed on ICE state-change events, so the + // suspended-flag stays true even after nextRetry has passed; the + // time-check here suppresses the noise for already-expired windows. + if peerState.IceBackoffSuspended && time.Now().Before(peerState.IceBackoffNextRetry) { + remaining := time.Until(peerState.IceBackoffNextRetry).Round(time.Second) + peerString += fmt.Sprintf( + " ICE backoff: suspended for %s (failure #%d, retry at %s)\n", + remaining, + peerState.IceBackoffFailures, + peerState.IceBackoffNextRetry.Format("15:04:05"), + ) + } + peersString += peerString } return peersString diff --git a/client/status/status_test.go b/client/status/status_test.go index 7754eebae97..5c99461b551 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -304,7 +304,10 @@ func TestParsingToJSON(t *testing.T) { "quantumResistance": false, "networks": [ "10.1.0.0/24" - ] + ], + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false }, { "fqdn": "peer-2.awesome-domain.com", @@ -327,7 +330,10 @@ func TestParsingToJSON(t *testing.T) { "transferSent": 1000, "latency": 10000000, "quantumResistance": false, - "networks": null + "networks": null, + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false } ] }, @@ -436,6 +442,9 @@ func TestParsingToYAML(t *testing.T) { quantumResistance: false networks: - 10.1.0.0/24 + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false - fqdn: peer-2.awesome-domain.com netbirdIp: 192.168.178.102 publicKey: Pubkey2 @@ -455,6 +464,9 @@ func TestParsingToYAML(t *testing.T) { latency: 10ms quantumResistance: false networks: [] + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false cliVersion: development daemonVersion: 0.14.1 daemonStatus: Connected diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 28f98ae59ae..40349262aa8 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -251,7 +251,6 @@ type serviceClient struct { mAllowSSH *systray.MenuItem mAutoConnect *systray.MenuItem mEnableRosenpass *systray.MenuItem - mLazyConnEnabled *systray.MenuItem mBlockInbound *systray.MenuItem mNotifications *systray.MenuItem mAdvancedSettings *systray.MenuItem @@ -287,6 +286,27 @@ type serviceClient struct { sDisableSSHAuth *widget.Check iSSHJWTCacheTTL *widget.Entry + // Phase 1+ ConnectionMode selector + per-mode timeout overrides. + // Defaulting to "Follow server" leaves the local override empty so + // the daemon uses whatever the management server pushes. + sConnectionMode *widget.Select + iRelayTimeout *widget.Entry + iP2pTimeout *widget.Entry + iP2pRetryMax *widget.Entry + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 + + // Phase 3.7h: latest values pushed by the management server, captured + // from GetConfigResponse.ServerPushed*. Used to render the + // "Follow server (currently: )" entry in the dropdown and the + // "use server default (Ns)" hints in the timeout entries. + serverPushedMode string + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + // observable settings over corresponding iMngURL and iPreSharedKey values. managementURL string preSharedKey string @@ -476,6 +496,19 @@ func (s *serviceClient) showSettingsUI() { s.sDisableSSHAuth = widget.NewCheck("Disable SSH Authentication", nil) s.iSSHJWTCacheTTL = widget.NewEntry() + // Connection-mode override + per-mode timeout fields. + // Order matches the Android spinner so behaviour is consistent. + s.sConnectionMode = widget.NewSelect( + []string{"Follow server", "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic"}, + func(string) { s.updateTimeoutEntriesEnabled() }, + ) + s.iRelayTimeout = widget.NewEntry() + s.iRelayTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pTimeout = widget.NewEntry() + s.iP2pTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pRetryMax = widget.NewEntry() + s.iP2pRetryMax.SetPlaceHolder("seconds (empty = use server default)") + s.wSettings.SetContent(s.getSettingsForm()) s.wSettings.Resize(fyne.NewSize(600, 400)) s.wSettings.SetFixedSize(true) @@ -586,9 +619,52 @@ func (s *serviceClient) hasSettingsChanged(iMngURL string, port, mtu int64) bool s.disableClientRoutes != s.sDisableClientRoutes.Checked || s.disableServerRoutes != s.sDisableServerRoutes.Checked || s.blockLANAccess != s.sBlockLANAccess.Checked || + s.hasConnectionModeChanges() || s.hasSSHChanges() } +// hasConnectionModeChanges reports whether the user touched the +// Connection Mode dropdown or any of the timeout entries on the +// Network tab. Empty / non-numeric timeout entries map to 0 +// (= no override). +func (s *serviceClient) hasConnectionModeChanges() bool { + if s.sConnectionMode == nil { + return false + } + desired := s.selectedConnectionMode() + if s.connectionMode != desired { + return true + } + return s.relayTimeoutSecs != parseUint32Field(s.iRelayTimeout.Text) || + s.p2pTimeoutSecs != parseUint32Field(s.iP2pTimeout.Text) || + s.p2pRetryMaxSecs != parseUint32Field(s.iP2pRetryMax.Text) +} + +// selectedConnectionMode returns the canonical mode string for the +// current dropdown selection. The "Follow server" entry maps to empty +// (clears any local override). It may carry a "(currently: )" +// suffix when the engine has received a PeerConfig, so we match by +// prefix. +func (s *serviceClient) selectedConnectionMode() string { + v := s.sConnectionMode.Selected + if v == "" || strings.HasPrefix(v, "Follow server") { + return "" + } + return v +} + +func parseUint32Field(text string) uint32 { + t := strings.TrimSpace(text) + if t == "" { + return 0 + } + v, err := strconv.ParseUint(t, 10, 32) + if err != nil { + return 0 + } + return uint32(v) +} + func (s *serviceClient) applySettingsChanges(iMngURL string, port, mtu int64) error { s.managementURL = iMngURL s.preSharedKey = s.iPreSharedKey.Text @@ -662,6 +738,17 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( req.OptionalPreSharedKey = &s.iPreSharedKey.Text } + // Connection-mode override + per-mode timeouts. Empty connection_mode + // clears any local override (= "Follow server"). + connMode := s.selectedConnectionMode() + req.ConnectionMode = &connMode + relaySecs := parseUint32Field(s.iRelayTimeout.Text) + p2pSecs := parseUint32Field(s.iP2pTimeout.Text) + retrySecs := parseUint32Field(s.iP2pRetryMax.Text) + req.RelayTimeoutSeconds = &relaySecs + req.P2PTimeoutSeconds = &p2pSecs + req.P2PRetryMaxSeconds = &retrySecs + return req, nil } @@ -731,10 +818,94 @@ func (s *serviceClient) getNetworkForm() *widget.Form { {Text: "Disable Client Routes", Widget: s.sDisableClientRoutes}, {Text: "Disable Server Routes", Widget: s.sDisableServerRoutes}, {Text: "Disable LAN Access", Widget: s.sBlockLANAccess}, + {Text: "Connection Mode", Widget: s.sConnectionMode}, + {Text: "Relay Timeout (s)", Widget: s.iRelayTimeout}, + {Text: "P2P Timeout (s)", Widget: s.iP2pTimeout}, + {Text: "P2P Retry-Max (s)", Widget: s.iP2pRetryMax}, }, } } +// followServerLabel returns the dropdown text for the "Follow server" +// option. When the engine has received a PeerConfig and the server has +// pushed a mode, we suffix it with "(currently: )" so users see +// what they would inherit by leaving the override on Follow server. +func (s *serviceClient) followServerLabel() string { + if s.serverPushedMode == "" { + return "Follow server" + } + return "Follow server (currently: " + s.serverPushedMode + ")" +} + +// formatTimeoutHint renders the placeholder text for an empty override +// entry, including the actual server-pushed default in seconds when +// available. +func formatTimeoutHint(secs uint32) string { + if secs == 0 { + return "seconds (empty = use server default)" + } + return "seconds (empty = use server default, " + strconv.FormatUint(uint64(secs), 10) + "s)" +} + +// refreshConnectionModeWidgets re-renders the Connection Mode dropdown +// and the timeout entries' placeholder text based on the latest +// server-pushed values. Safe to call multiple times. Preserves the +// current selection by canonical-mode string (so "(currently: ...)" +// suffix changes do not lose the user's choice). +func (s *serviceClient) refreshConnectionModeWidgets() { + if s.sConnectionMode == nil { + return + } + prev := s.selectedConnectionMode() + s.sConnectionMode.Options = []string{ + s.followServerLabel(), + "relay-forced", + "p2p", + "p2p-lazy", + "p2p-dynamic", + } + if prev == "" { + s.sConnectionMode.SetSelected(s.followServerLabel()) + } else { + s.sConnectionMode.SetSelected(prev) + } + s.sConnectionMode.Refresh() + + if s.iRelayTimeout != nil { + s.iRelayTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedRelayTimeoutSecs)) + } + if s.iP2pTimeout != nil { + s.iP2pTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pTimeoutSecs)) + } + if s.iP2pRetryMax != nil { + s.iP2pRetryMax.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pRetryMaxSecs)) + } +} + +// updateTimeoutEntriesEnabled enables only the timeout fields that are +// meaningful for the currently-selected connection mode. The lazy +// connection manager (and therefore inactivity teardown) only runs in +// p2p-lazy + p2p-dynamic, so other modes get all three fields disabled. +func (s *serviceClient) updateTimeoutEntriesEnabled() { + if s.iRelayTimeout == nil { + return + } + switch s.sConnectionMode.Selected { + case "p2p-lazy": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + case "p2p-dynamic": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Enable() + s.iP2pRetryMax.Enable() + default: + s.iRelayTimeout.Disable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + } +} + func (s *serviceClient) getSSHForm() *widget.Form { return &widget.Form{ Items: []*widget.FormItem{ @@ -1042,7 +1213,6 @@ func (s *serviceClient) onTrayReady() { s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", allowSSHMenuDescr, false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", autoConnectMenuDescr, false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", quantumResistanceMenuDescr, false) - s.mLazyConnEnabled = s.mSettings.AddSubMenuItemCheckbox("Enable Lazy Connections", lazyConnMenuDescr, false) s.mBlockInbound = s.mSettings.AddSubMenuItemCheckbox("Block Inbound Connections", blockInboundMenuDescr, false) s.mNotifications = s.mSettings.AddSubMenuItemCheckbox("Notifications", notificationsMenuDescr, false) s.mSettings.AddSeparator() @@ -1314,6 +1484,14 @@ func (s *serviceClient) getSrvConfig() { cfg = protoConfigToConfig(srvCfg) + // Capture the raw server-pushed values so the UI can show + // "Follow server (currently: )" and the numeric default-hints + // in the override entries. + s.serverPushedMode = srvCfg.GetServerPushedConnectionMode() + s.serverPushedRelayTimeoutSecs = srvCfg.GetServerPushedRelayTimeoutSeconds() + s.serverPushedP2pTimeoutSecs = srvCfg.GetServerPushedP2PTimeoutSeconds() + s.serverPushedP2pRetryMaxSecs = srvCfg.GetServerPushedP2PRetryMaxSeconds() + if cfg.ManagementURL.String() != "" { s.managementURL = cfg.ManagementURL.String() } @@ -1348,6 +1526,11 @@ func (s *serviceClient) getSrvConfig() { s.sshJWTCacheTTL = *cfg.SSHJWTCacheTTL } + s.connectionMode = cfg.ConnectionMode + s.relayTimeoutSecs = cfg.RelayTimeoutSeconds + s.p2pTimeoutSecs = cfg.P2pTimeoutSeconds + s.p2pRetryMaxSecs = cfg.P2pRetryMaxSeconds + if s.showAdvancedSettings { s.iMngURL.SetText(s.managementURL) s.iPreSharedKey.SetText(cfg.PreSharedKey) @@ -1386,6 +1569,33 @@ func (s *serviceClient) getSrvConfig() { if cfg.SSHJWTCacheTTL != nil { s.iSSHJWTCacheTTL.SetText(strconv.Itoa(*cfg.SSHJWTCacheTTL)) } + + // Connection-mode dropdown + timeout entries. Refresh first so + // the "Follow server (currently: ...)" suffix and the numeric + // default-hints reflect what GetConfigResponse just delivered. + s.refreshConnectionModeWidgets() + switch cfg.ConnectionMode { + case "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic": + s.sConnectionMode.SetSelected(cfg.ConnectionMode) + default: + s.sConnectionMode.SetSelected(s.followServerLabel()) + } + if cfg.RelayTimeoutSeconds == 0 { + s.iRelayTimeout.SetText("") + } else { + s.iRelayTimeout.SetText(strconv.FormatUint(uint64(cfg.RelayTimeoutSeconds), 10)) + } + if cfg.P2pTimeoutSeconds == 0 { + s.iP2pTimeout.SetText("") + } else { + s.iP2pTimeout.SetText(strconv.FormatUint(uint64(cfg.P2pTimeoutSeconds), 10)) + } + if cfg.P2pRetryMaxSeconds == 0 { + s.iP2pRetryMax.SetText("") + } else { + s.iP2pRetryMax.SetText(strconv.FormatUint(uint64(cfg.P2pRetryMaxSeconds), 10)) + } + s.updateTimeoutEntriesEnabled() } if s.mNotifications == nil { @@ -1465,6 +1675,12 @@ func protoConfigToConfig(cfg *proto.GetConfigResponse) *profilemanager.Config { ttl := int(cfg.SshJWTCacheTTL) config.SSHJWTCacheTTL = &ttl + // Phase 1+ ConnectionMode override + per-mode timeouts. + config.ConnectionMode = cfg.ConnectionMode + config.RelayTimeoutSeconds = cfg.RelayTimeoutSeconds + config.P2pTimeoutSeconds = cfg.P2PTimeoutSeconds + config.P2pRetryMaxSeconds = cfg.P2PRetryMaxSeconds + return &config } @@ -1551,12 +1767,6 @@ func (s *serviceClient) loadSettings() { s.mEnableRosenpass.Uncheck() } - if cfg.LazyConnectionEnabled { - s.mLazyConnEnabled.Check() - } else { - s.mLazyConnEnabled.Uncheck() - } - if cfg.BlockInbound { s.mBlockInbound.Check() } else { @@ -1579,7 +1789,6 @@ func (s *serviceClient) updateConfig() error { disableAutoStart := !s.mAutoConnect.Checked() sshAllowed := s.mAllowSSH.Checked() rosenpassEnabled := s.mEnableRosenpass.Checked() - lazyConnectionEnabled := s.mLazyConnEnabled.Checked() blockInbound := s.mBlockInbound.Checked() notificationsDisabled := !s.mNotifications.Checked() @@ -1602,14 +1811,13 @@ func (s *serviceClient) updateConfig() error { } req := proto.SetConfigRequest{ - ProfileName: activeProf.Name, - Username: currUser.Username, - DisableAutoConnect: &disableAutoStart, - ServerSSHAllowed: &sshAllowed, - RosenpassEnabled: &rosenpassEnabled, - LazyConnectionEnabled: &lazyConnectionEnabled, - BlockInbound: &blockInbound, - DisableNotifications: ¬ificationsDisabled, + ProfileName: activeProf.Name, + Username: currUser.Username, + DisableAutoConnect: &disableAutoStart, + ServerSSHAllowed: &sshAllowed, + RosenpassEnabled: &rosenpassEnabled, + BlockInbound: &blockInbound, + DisableNotifications: ¬ificationsDisabled, } if _, err := conn.SetConfig(s.ctx, &req); err != nil { diff --git a/client/ui/const.go b/client/ui/const.go index 48619be752c..ce7a9a29421 100644 --- a/client/ui/const.go +++ b/client/ui/const.go @@ -4,7 +4,6 @@ const ( allowSSHMenuDescr = "Allow SSH connections" autoConnectMenuDescr = "Connect automatically when the service starts" quantumResistanceMenuDescr = "Enable post-quantum security via Rosenpass" - lazyConnMenuDescr = "[Experimental] Enable lazy connections" blockInboundMenuDescr = "Block inbound connections to the local machine and routed networks" notificationsMenuDescr = "Enable notifications" advancedSettingsMenuDescr = "Advanced settings of the application" diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 876fcef5fd8..90208230867 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -43,8 +43,6 @@ func (h *eventHandler) listen(ctx context.Context) { h.handleAutoConnectClick() case <-h.client.mEnableRosenpass.ClickedCh: h.handleRosenpassClick() - case <-h.client.mLazyConnEnabled.ClickedCh: - h.handleLazyConnectionClick() case <-h.client.mBlockInbound.ClickedCh: h.handleBlockInboundClick() case <-h.client.mAdvancedSettings.ClickedCh: @@ -152,15 +150,6 @@ func (h *eventHandler) handleRosenpassClick() { } } -func (h *eventHandler) handleLazyConnectionClick() { - h.toggleCheckbox(h.client.mLazyConnEnabled) - if err := h.updateConfigWithErr(); err != nil { - h.toggleCheckbox(h.client.mLazyConnEnabled) // revert checkbox state on error - log.Errorf("failed to update config: %v", err) - h.client.notifier.Send("Error", "Failed to update lazy connection settings") - } -} - func (h *eventHandler) handleBlockInboundClick() { h.toggleCheckbox(h.client.mBlockInbound) if err := h.updateConfigWithErr(); err != nil { diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index ef417d3cfb5..1f0e8fb58c7 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -9,6 +9,7 @@ import ( log "github.com/sirupsen/logrus" integrationsConfig "github.com/netbirdio/management-integrations/integrations/config" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" @@ -22,6 +23,11 @@ import ( "github.com/netbirdio/netbird/shared/sshauth" ) +// p2pRetryMaxDisabledSentinel is the wire-format value that signals +// "user-explicit disable backoff" (uint32-max). The 0 wire-value is +// reserved for "not set, use daemon default". Phase 3 of #5989. +const p2pRetryMaxDisabledSentinel = ^uint32(0) + func toNetbirdConfig(config *nbconfig.Config, turnCredentials *Token, relayToken *Token, extraSettings *types.ExtraSettings) *proto.NetbirdConfig { if config == nil { return nil @@ -100,12 +106,49 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set sshConfig.JwtConfig = buildJWTConfig(httpConfig, deviceFlowConfig) } + // Resolve the effective ConnectionMode for this peer. + // Phase 1: account-wide settings only (per-peer / per-group resolution + // follows in Phase 3 / issue #5990). The new ConnectionMode field wins + // over the legacy LazyConnectionEnabled boolean. UNSPECIFIED in Settings + // (i.e. ConnectionMode == nil) falls back to the legacy bool. + resolvedMode := connectionmode.ResolveLegacyLazyBool(settings.LazyConnectionEnabled) + if settings.ConnectionMode != nil { + if m, err := connectionmode.ParseString(*settings.ConnectionMode); err == nil && m != connectionmode.ModeUnspecified { + resolvedMode = m + } + } + + relayTO := uint32(0) + if settings.RelayTimeoutSeconds != nil { + relayTO = *settings.RelayTimeoutSeconds + } + p2pTO := uint32(0) + if settings.P2pTimeoutSeconds != nil { + p2pTO = *settings.P2pTimeoutSeconds + } + p2pRetryMax := uint32(0) + if settings.P2pRetryMaxSeconds != nil { + if *settings.P2pRetryMaxSeconds == 0 { + p2pRetryMax = p2pRetryMaxDisabledSentinel + } else { + p2pRetryMax = *settings.P2pRetryMaxSeconds + } + } + return &proto.PeerConfig{ Address: fmt.Sprintf("%s/%d", peer.IP.String(), netmask), SshConfig: sshConfig, Fqdn: fqdn, RoutingPeerDnsResolutionEnabled: settings.RoutingPeerDNSResolutionEnabled, - LazyConnectionEnabled: settings.LazyConnectionEnabled, + // Send BOTH the new enum (for new clients) and the legacy boolean + // (for old clients). New clients prefer the explicit enum and + // ignore the bool; old clients ignore the unknown enum field + // (proto3 default behaviour) and fall back to the bool. + LazyConnectionEnabled: resolvedMode.ToLazyConnectionEnabled(), + ConnectionMode: resolvedMode.ToProto(), + P2PTimeoutSeconds: p2pTO, + P2PRetryMaxSeconds: p2pRetryMax, + RelayTimeoutSeconds: relayTO, AutoUpdate: &proto.AutoUpdateSettings{ Version: settings.AutoUpdateVersion, AlwaysUpdate: settings.AutoUpdateAlways, diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 1e75caf959a..961bea0210e 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -2,6 +2,7 @@ package grpc import ( "fmt" + "net" "net/netip" "reflect" "testing" @@ -12,8 +13,172 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/types" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) +// TestToPeerConfig_ConnectionModeResolution covers Phase 1 of issue #5989: +// the management server resolves the effective ConnectionMode from +// Settings (with the new ConnectionMode field winning over the legacy +// LazyConnectionEnabled boolean), then writes BOTH wire fields so old +// clients (boolean only) and new clients (enum only) see consistent +// behaviour. +func TestToPeerConfig_ConnectionModeResolution(t *testing.T) { + cases := []struct { + name string + settingsMode *string + settingsLazyBool bool + settingsRelayTO *uint32 + settingsP2pTO *uint32 + wantPCMode mgmProto.ConnectionMode + wantPCLazyBool bool + wantPCRelayTO uint32 + wantPCP2pTO uint32 + }{ + { + name: "no settings -> P2P + lazy=false", + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "only legacy lazy=true -> P2P_LAZY + lazy=true", + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p-lazy explicit -> P2P_LAZY + lazy=true", + settingsMode: strPtrTest("p2p-lazy"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p explicit -> P2P + lazy=false", + settingsMode: strPtrTest("p2p"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode=relay-forced -> RELAY_FORCED + lazy=false (structural compat gap)", + settingsMode: strPtrTest("relay-forced"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode wins over conflicting legacy bool", + settingsMode: strPtrTest("relay-forced"), + settingsLazyBool: true, // ignored + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "RelayTimeout propagates", + settingsMode: strPtrTest("p2p-lazy"), + settingsRelayTO: u32PtrTest(42), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + wantPCRelayTO: 42, + }, + { + name: "P2pTimeout propagates", + settingsMode: strPtrTest("p2p-dynamic"), + settingsP2pTO: u32PtrTest(180), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + wantPCLazyBool: false, // p2p-dynamic maps to lazy=false (best-match for old clients) + wantPCP2pTO: 180, + }, + { + name: "Garbage in ConnectionMode falls back to legacy bool", + settingsMode: strPtrTest("not-a-mode"), + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + } + + // Minimal Network and Peer fixtures shared across cases. + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + settings := &types.Settings{ + LazyConnectionEnabled: c.settingsLazyBool, + ConnectionMode: c.settingsMode, + RelayTimeoutSeconds: c.settingsRelayTO, + P2pTimeoutSeconds: c.settingsP2pTO, + } + pc := toPeerConfig(peer, network, "example.local", settings, nil, nil, false) + + assert.Equal(t, c.wantPCMode, pc.GetConnectionMode(), + "ConnectionMode wire field") + assert.Equal(t, c.wantPCLazyBool, pc.GetLazyConnectionEnabled(), + "LazyConnectionEnabled wire field (backwards-compat)") + assert.Equal(t, c.wantPCRelayTO, pc.GetRelayTimeoutSeconds(), + "RelayTimeoutSeconds wire field") + assert.Equal(t, c.wantPCP2pTO, pc.GetP2PTimeoutSeconds(), + "P2PTimeoutSeconds wire field") + }) + } +} + +func strPtrTest(s string) *string { return &s } +func u32PtrTest(v uint32) *uint32 { return &v } + +// toPeerConfigForTest is a minimal helper that calls toPeerConfig with a +// fixed peer and network fixture, forwarding only the settings argument. +// Used by the P2pRetryMaxSeconds sentinel tests (Phase 3 / #5989). +func toPeerConfigForTest(settings *types.Settings) *mgmProto.PeerConfig { + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + return toPeerConfig(peer, network, "example.local", settings, nil, nil, false) +} + +func TestToPeerConfig_P2pRetryMax_NullDB(t *testing.T) { + settings := &types.Settings{ + P2pRetryMaxSeconds: nil, // DB has NULL + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 0 { + t.Errorf("NULL in DB should produce 0 on the wire (= use daemon default), got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_ExplicitDisable(t *testing.T) { + zero := uint32(0) + settings := &types.Settings{ + P2pRetryMaxSeconds: &zero, // user explicitly set 0 + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != ^uint32(0) { + t.Errorf("explicit 0 should map to uint32-max sentinel on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_NormalValue(t *testing.T) { + v := uint32(600) + settings := &types.Settings{ + P2pRetryMaxSeconds: &v, + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 600 { + t.Errorf("expected 600 on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + func TestToProtocolDNSConfigWithCache(t *testing.T) { var cache cache.DNSConfigCache diff --git a/management/server/account.go b/management/server/account.go index 4b71ab486eb..7ba5e709708 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -371,6 +371,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.handleRoutingPeerDNSResolutionSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleLazyConnectionSettings(ctx, oldSettings, newSettings, userID, accountID) + am.handleConnectionModeSettings(ctx, oldSettings, newSettings, userID, accountID) am.handlePeerLoginExpirationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleGroupsPropagationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleAutoUpdateVersionSettings(ctx, oldSettings, newSettings, userID, accountID) @@ -455,6 +456,72 @@ func (am *DefaultAccountManager) handleLazyConnectionSettings(ctx context.Contex } } +// handleConnectionModeSettings emits one audit event per changed Phase-1 +// connection-mode setting (mode, relay timeout, p2p timeout). Each event +// carries old/new values in the meta payload so administrators can audit +// the full transition. NULL transitions show as empty string / 0 in the +// meta — chosen over a sentinel so the frontend can render uniformly. +func (am *DefaultAccountManager) handleConnectionModeSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { + if !equalStringPtr(oldSettings.ConnectionMode, newSettings.ConnectionMode) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountConnectionModeChanged, map[string]any{ + "old": derefStringPtr(oldSettings.ConnectionMode), + "new": derefStringPtr(newSettings.ConnectionMode), + }) + } + if !equalUint32Ptr(oldSettings.RelayTimeoutSeconds, newSettings.RelayTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountRelayTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.RelayTimeoutSeconds), + "new": derefUint32Ptr(newSettings.RelayTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pTimeoutSeconds, newSettings.P2pTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pTimeoutSeconds), + "new": derefUint32Ptr(newSettings.P2pTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pRetryMaxSeconds, newSettings.P2pRetryMaxSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pRetryMaxChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pRetryMaxSeconds), + "new": derefUint32Ptr(newSettings.P2pRetryMaxSeconds), + }) + } +} + +func equalStringPtr(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32Ptr(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func derefStringPtr(p *string) string { + if p == nil { + return "" + } + return *p +} + +func derefUint32Ptr(p *uint32) uint32 { + if p == nil { + return 0 + } + return *p +} + func (am *DefaultAccountManager) handlePeerLoginExpirationSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { if oldSettings.PeerLoginExpirationEnabled != newSettings.PeerLoginExpirationEnabled { event := activity.AccountPeerLoginExpirationEnabled diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index ddc3e00c38d..8b09a74b182 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -232,6 +232,19 @@ const ( // DomainValidated indicates that a custom domain was validated DomainValidated Activity = 120 + // AccountConnectionModeChanged indicates the account-wide ConnectionMode + // setting was changed (Phase 1 of issue #5989). + AccountConnectionModeChanged Activity = 121 + // AccountRelayTimeoutChanged indicates the account-wide RelayTimeoutSeconds + // setting was changed. + AccountRelayTimeoutChanged Activity = 122 + // AccountP2pTimeoutChanged indicates the account-wide P2pTimeoutSeconds + // setting was changed. + AccountP2pTimeoutChanged Activity = 123 + // AccountP2pRetryMaxChanged indicates the account-wide P2pRetryMaxSeconds + // setting was modified (Phase 3 of #5989). + AccountP2pRetryMaxChanged Activity = 124 + AccountDeleted Activity = 99999 ) @@ -335,6 +348,11 @@ var activityMap = map[Activity]Code{ AccountLazyConnectionEnabled: {"Account lazy connection enabled", "account.setting.lazy.connection.enable"}, AccountLazyConnectionDisabled: {"Account lazy connection disabled", "account.setting.lazy.connection.disable"}, + AccountConnectionModeChanged: {"Account connection mode changed", "account.setting.connection_mode.change"}, + AccountRelayTimeoutChanged: {"Account relay timeout changed", "account.setting.relay_timeout.change"}, + AccountP2pTimeoutChanged: {"Account p2p timeout changed", "account.setting.p2p_timeout.change"}, + AccountP2pRetryMaxChanged: {"Account p2p retry max changed", "account.setting.p2p_retry_max.change"}, + AccountNetworkRangeUpdated: {"Account network range updated", "account.network.range.update"}, PeerIPUpdated: {"Peer IP updated", "peer.ip.update"}, diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index cc5567e3db6..f4c512ffb5e 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -215,6 +215,29 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS if req.Settings.LazyConnectionEnabled != nil { returnSettings.LazyConnectionEnabled = *req.Settings.LazyConnectionEnabled } + if req.Settings.ConnectionMode != nil { + modeStr := string(*req.Settings.ConnectionMode) + if !req.Settings.ConnectionMode.Valid() { + return nil, fmt.Errorf("invalid connection_mode %q", modeStr) + } + // Persist as the canonical string. Clients clear an override by + // sending JSON null (which lands here as a nil pointer and skips + // this whole block, leaving the existing value untouched). + s := modeStr + returnSettings.ConnectionMode = &s + } + if req.Settings.P2pTimeoutSeconds != nil { + v := uint32(*req.Settings.P2pTimeoutSeconds) + returnSettings.P2pTimeoutSeconds = &v + } + if req.Settings.P2pRetryMaxSeconds != nil { + v := uint32(*req.Settings.P2pRetryMaxSeconds) + returnSettings.P2pRetryMaxSeconds = &v + } + if req.Settings.RelayTimeoutSeconds != nil { + v := uint32(*req.Settings.RelayTimeoutSeconds) + returnSettings.RelayTimeoutSeconds = &v + } if req.Settings.AutoUpdateVersion != nil { _, err := goversion.NewSemver(*req.Settings.AutoUpdateVersion) if *req.Settings.AutoUpdateVersion == autoUpdateLatestVersion || @@ -349,6 +372,34 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A PeerExposeEnabled: settings.PeerExposeEnabled, PeerExposeGroups: settings.PeerExposeGroups, LazyConnectionEnabled: &settings.LazyConnectionEnabled, + ConnectionMode: func() *api.AccountSettingsConnectionMode { + if settings.ConnectionMode == nil { + return nil + } + v := api.AccountSettingsConnectionMode(*settings.ConnectionMode) + return &v + }(), + P2pTimeoutSeconds: func() *int64 { + if settings.P2pTimeoutSeconds == nil { + return nil + } + v := int64(*settings.P2pTimeoutSeconds) + return &v + }(), + P2pRetryMaxSeconds: func() *int64 { + if settings.P2pRetryMaxSeconds == nil { + return nil + } + v := int64(*settings.P2pRetryMaxSeconds) + return &v + }(), + RelayTimeoutSeconds: func() *int64 { + if settings.RelayTimeoutSeconds == nil { + return nil + } + v := int64(*settings.RelayTimeoutSeconds) + return &v + }(), DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, AutoUpdateAlways: &settings.AutoUpdateAlways, diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index 739dfe2f655..fc61ada712b 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -336,3 +336,79 @@ func TestAccounts_AccountsHandler(t *testing.T) { }) } } + +func TestAccountsHandler_PutSettings_P2pRetryMax(t *testing.T) { + accountID := "test_account" + adminUser := types.NewAdminUser("test_user") + + sr := func(v string) *string { return &v } + br := func(v bool) *bool { return &v } + ir := func(v int64) *int64 { return &v } + + handler := initAccountsTestData(t, &types.Account{ + Id: accountID, + Domain: "hotmail.com", + Network: types.NewNetwork(), + Users: map[string]*types.User{ + adminUser.Id: adminUser, + }, + Settings: &types.Settings{ + PeerLoginExpirationEnabled: false, + PeerLoginExpiration: time.Hour, + RegularUsersViewBlocked: false, + }, + }) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest( + http.MethodPut, + "/api/accounts/"+accountID, + bytes.NewBufferString(`{"settings": {"peer_login_expiration": 3600, "peer_login_expiration_enabled": false, "p2p_retry_max_seconds": 600}, "onboarding": {"onboarding_flow_pending": true, "signup_form_pending": true}}`), + ) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: adminUser.Id, + AccountId: accountID, + Domain: "hotmail.com", + }) + + router := mux.NewRouter() + router.HandleFunc("/api/accounts/{accountId}", handler.updateAccount).Methods("PUT") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + if status := recorder.Code; status != http.StatusOK { + t.Fatalf("handler returned wrong status code: got %v want %v", status, http.StatusOK) + } + + content, err := io.ReadAll(res.Body) + if err != nil { + t.Fatalf("could not read response body: %v", err) + } + + var actual api.Account + if err = json.Unmarshal(content, &actual); err != nil { + t.Fatalf("response is not valid JSON: %v", err) + } + + expectedSettings := api.AccountSettings{ + PeerLoginExpiration: 3600, + PeerLoginExpirationEnabled: false, + GroupsPropagationEnabled: br(false), + JwtGroupsClaimName: sr(""), + JwtGroupsEnabled: br(false), + JwtAllowGroups: &[]string{}, + RegularUsersViewBlocked: false, + RoutingPeerDnsResolutionEnabled: br(false), + LazyConnectionEnabled: br(false), + DnsDomain: sr(""), + AutoUpdateAlways: br(false), + AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), + P2pRetryMaxSeconds: ir(600), + } + + assert.Equal(t, expectedSettings, actual.Settings) +} diff --git a/management/server/types/settings.go b/management/server/types/settings.go index 4ea79ec72fc..19e5085c1b8 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -58,6 +58,27 @@ type Settings struct { // LazyConnectionEnabled indicates if the experimental feature is enabled or disabled LazyConnectionEnabled bool `gorm:"default:false"` + // ConnectionMode is the account-wide default connection mode (Phase 1 + // of issue #5989). Nullable: NULL means "fall back to LazyConnectionEnabled". + // Stored as the canonical lower-kebab-case string (e.g. "p2p-lazy"). + ConnectionMode *string `gorm:"type:varchar(32);default:null"` + + // RelayTimeoutSeconds, when non-NULL, overrides the built-in default + // (5 min). 0 = "never tear down". Nullable to distinguish "use default" + // from "explicit 0". + RelayTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pTimeoutSeconds is reserved for Phase 2; same nullable semantics. + // Built-in default in Phase 1: 180 min, but not yet effective. + P2pTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pRetryMaxSeconds is reserved for Phase 3 (#5989). Caps the ICE- + // failure backoff sequence in p2p-dynamic mode. NULL = use daemon's + // built-in default (900s = 15 min). 0 = disable backoff (treated + // internally as "user-explicit-disable" via uint32-max sentinel on + // the wire). + P2pRetryMaxSeconds *uint32 `gorm:"default:null"` + // AutoUpdateVersion client auto-update version AutoUpdateVersion string `gorm:"default:'disabled'"` @@ -92,6 +113,10 @@ func (s *Settings) Copy() *Settings { PeerExposeEnabled: s.PeerExposeEnabled, PeerExposeGroups: slices.Clone(s.PeerExposeGroups), LazyConnectionEnabled: s.LazyConnectionEnabled, + ConnectionMode: cloneStringPtr(s.ConnectionMode), + RelayTimeoutSeconds: cloneUint32Ptr(s.RelayTimeoutSeconds), + P2pTimeoutSeconds: cloneUint32Ptr(s.P2pTimeoutSeconds), + P2pRetryMaxSeconds: cloneUint32Ptr(s.P2pRetryMaxSeconds), DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, AutoUpdateVersion: s.AutoUpdateVersion, @@ -138,3 +163,23 @@ func (e *ExtraSettings) Copy() *ExtraSettings { FlowDnsCollectionEnabled: e.FlowDnsCollectionEnabled, } } + +// cloneStringPtr returns a deep copy of a *string (nil-safe). Used by +// Settings.Copy for the new nullable ConnectionMode field. +func cloneStringPtr(p *string) *string { + if p == nil { + return nil + } + v := *p + return &v +} + +// cloneUint32Ptr returns a deep copy of a *uint32 (nil-safe). Used by +// Settings.Copy for the new nullable timeout fields. +func cloneUint32Ptr(p *uint32) *uint32 { + if p == nil { + return nil + } + v := *p + return &v +} diff --git a/management/server/types/settings_test.go b/management/server/types/settings_test.go new file mode 100644 index 00000000000..b6a42f6c6ba --- /dev/null +++ b/management/server/types/settings_test.go @@ -0,0 +1,20 @@ +package types + +import "testing" + +func TestSettings_Copy_P2pRetryMaxSeconds(t *testing.T) { + v := uint32(900) + src := &Settings{P2pRetryMaxSeconds: &v} + dst := src.Copy() + if dst.P2pRetryMaxSeconds == nil { + t.Fatal("Copy lost P2pRetryMaxSeconds pointer") + } + if *dst.P2pRetryMaxSeconds != 900 { + t.Fatalf("expected 900, got %d", *dst.P2pRetryMaxSeconds) + } + // Verify it's a deep copy (different pointers) + *dst.P2pRetryMaxSeconds = 600 + if *src.P2pRetryMaxSeconds != 900 { + t.Fatal("Copy did not deep-clone P2pRetryMaxSeconds") + } +} diff --git a/shared/connectionmode/mode.go b/shared/connectionmode/mode.go new file mode 100644 index 00000000000..d3b1c9e14e4 --- /dev/null +++ b/shared/connectionmode/mode.go @@ -0,0 +1,128 @@ +// Package connectionmode defines the Mode type used to control how a peer +// establishes connections to other peers. Introduced in Phase 1 of the +// connection-mode consolidation (issue #5989) to replace the historical +// pair (NB_FORCE_RELAY, NB_ENABLE_EXPERIMENTAL_LAZY_CONN). +package connectionmode + +import ( + "fmt" + "strings" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Mode is a connection mode for peer-to-peer (or relay-only) connections. +// ModeUnspecified is the zero value and indicates "fall back to the next +// resolution source" (env -> config -> server-pushed -> legacy bool). +type Mode int + +const ( + ModeUnspecified Mode = iota + ModeRelayForced + ModeP2P + ModeP2PLazy + ModeP2PDynamic + // ModeFollowServer is a client-side sentinel: setting this in the + // client config explicitly clears any local override so the + // server-pushed value (or its legacy fallback) is used. It MUST NOT + // be sent on the wire -- ToProto returns UNSPECIFIED for it. + ModeFollowServer +) + +// String returns the canonical lower-kebab-case name of the mode. +func (m Mode) String() string { + switch m { + case ModeRelayForced: + return "relay-forced" + case ModeP2P: + return "p2p" + case ModeP2PLazy: + return "p2p-lazy" + case ModeP2PDynamic: + return "p2p-dynamic" + case ModeFollowServer: + return "follow-server" + default: + return "" + } +} + +// ParseString accepts the canonical name (case-insensitive, surrounding +// whitespace tolerated) and returns the corresponding Mode. Empty input +// returns ModeUnspecified with no error. Unknown input returns +// ModeUnspecified with an error. +func ParseString(s string) (Mode, error) { + switch strings.ToLower(strings.TrimSpace(s)) { + case "": + return ModeUnspecified, nil + case "relay-forced": + return ModeRelayForced, nil + case "p2p": + return ModeP2P, nil + case "p2p-lazy": + return ModeP2PLazy, nil + case "p2p-dynamic": + return ModeP2PDynamic, nil + case "follow-server": + return ModeFollowServer, nil + default: + return ModeUnspecified, fmt.Errorf("unknown connection mode %q", s) + } +} + +// FromProto translates a proto enum value to the internal Mode. +func FromProto(m mgmProto.ConnectionMode) Mode { + switch m { + case mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED: + return ModeRelayForced + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P: + return ModeP2P + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY: + return ModeP2PLazy + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC: + return ModeP2PDynamic + default: + return ModeUnspecified + } +} + +// ToProto translates the internal Mode to a proto enum value. +// ModeFollowServer is a client-side concept and intentionally maps to +// UNSPECIFIED so it never appears on the wire. +func (m Mode) ToProto() mgmProto.ConnectionMode { + switch m { + case ModeRelayForced: + return mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED + case ModeP2P: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P + case ModeP2PLazy: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY + case ModeP2PDynamic: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC + default: + return mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED + } +} + +// ResolveLegacyLazyBool maps the historical Settings.LazyConnectionEnabled +// boolean to the new Mode. Used when a new client receives an old server's +// PeerConfig (ConnectionMode = UNSPECIFIED) or when the management server +// has no explicit Settings.ConnectionMode set yet. +func ResolveLegacyLazyBool(lazy bool) Mode { + if lazy { + return ModeP2PLazy + } + return ModeP2P +} + +// ToLazyConnectionEnabled is the inverse mapping for backwards-compat. +// Used by toPeerConfig() so old clients (which only know the boolean) +// still get a sensible behaviour. +// +// Note: ModeRelayForced cannot be expressed via the legacy boolean and +// falls back to false. This is a structural compat gap documented in the +// release notes; admins must set NB_FORCE_RELAY=true on old clients +// or upgrade them. +func (m Mode) ToLazyConnectionEnabled() bool { + return m == ModeP2PLazy +} diff --git a/shared/connectionmode/mode_test.go b/shared/connectionmode/mode_test.go new file mode 100644 index 00000000000..01a9c11c929 --- /dev/null +++ b/shared/connectionmode/mode_test.go @@ -0,0 +1,106 @@ +package connectionmode + +import ( + "testing" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestParseString(t *testing.T) { + cases := []struct { + input string + want Mode + wantErr bool + }{ + {"relay-forced", ModeRelayForced, false}, + {"p2p", ModeP2P, false}, + {"p2p-lazy", ModeP2PLazy, false}, + {"p2p-dynamic", ModeP2PDynamic, false}, + {"follow-server", ModeFollowServer, false}, + {"", ModeUnspecified, false}, + {"P2P", ModeP2P, false}, + {" p2p-lazy ", ModeP2PLazy, false}, + {"junk", ModeUnspecified, true}, + } + for _, c := range cases { + got, err := ParseString(c.input) + if (err != nil) != c.wantErr { + t.Errorf("ParseString(%q): err=%v wantErr=%v", c.input, err, c.wantErr) + continue + } + if got != c.want { + t.Errorf("ParseString(%q) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestFromProto(t *testing.T) { + cases := []struct { + input mgmProto.ConnectionMode + want Mode + }{ + {mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED, ModeUnspecified}, + {mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, ModeRelayForced}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P, ModeP2P}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, ModeP2PLazy}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, ModeP2PDynamic}, + } + for _, c := range cases { + got := FromProto(c.input) + if got != c.want { + t.Errorf("FromProto(%v) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestToProto(t *testing.T) { + for _, m := range []Mode{ModeUnspecified, ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic} { + got := FromProto(m.ToProto()) + if got != m { + t.Errorf("round-trip Mode %v -> proto -> Mode = %v", m, got) + } + } + if got := ModeFollowServer.ToProto(); got != mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED { + t.Errorf("ModeFollowServer.ToProto() = %v, want UNSPECIFIED", got) + } +} + +func TestResolveLegacyLazyBool(t *testing.T) { + if got := ResolveLegacyLazyBool(true); got != ModeP2PLazy { + t.Errorf("ResolveLegacyLazyBool(true) = %v, want ModeP2PLazy", got) + } + if got := ResolveLegacyLazyBool(false); got != ModeP2P { + t.Errorf("ResolveLegacyLazyBool(false) = %v, want ModeP2P", got) + } +} + +func TestToLazyConnectionEnabled(t *testing.T) { + cases := []struct { + mode Mode + want bool + }{ + {ModeRelayForced, false}, + {ModeP2P, false}, + {ModeP2PLazy, true}, + {ModeP2PDynamic, false}, + {ModeUnspecified, false}, + } + for _, c := range cases { + got := c.mode.ToLazyConnectionEnabled() + if got != c.want { + t.Errorf("Mode %v ToLazyConnectionEnabled() = %v, want %v", c.mode, got, c.want) + } + } +} + +func TestStringRoundTrip(t *testing.T) { + for _, m := range []Mode{ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic, ModeFollowServer} { + got, err := ParseString(m.String()) + if err != nil { + t.Errorf("round-trip parse of %v.String() failed: %v", m, err) + } + if got != m { + t.Errorf("round-trip %v -> %q -> %v", m, m.String(), got) + } + } +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 327e2061425..c0ea938ea5f 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -359,6 +359,50 @@ components: description: Enables or disables experimental lazy connection type: boolean example: true + connection_mode: + x-experimental: true + type: string + enum: [relay-forced, p2p, p2p-lazy, p2p-dynamic] + nullable: true + description: | + Account-wide default peer-connection mode. NULL means + "fall back to lazy_connection_enabled" for backwards compatibility. + Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + functional. p2p-dynamic is reserved (passes through as p2p in + Phase 1; will become functional in Phase 2). + p2p_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default ICE-worker idle timeout in seconds. 0 = never tear down. + Effective only in p2p-dynamic mode (added in Phase 2). + NULL means "use built-in default" (180 minutes). + p2p_retry_max_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Maximum interval between P2P retry attempts after consecutive + ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + disable backoff (always retry immediately, Phase-2 behavior). + Effective only in p2p-dynamic mode (added in Phase 3). + example: 900 + relay_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default relay-worker idle timeout in seconds. 0 = never tear + down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + client. NULL means "use built-in default" (5 minutes). auto_update_version: description: Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") type: string diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index dc916f81ac9..83d1ffef827 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,6 +1,6 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.7.0 DO NOT EDIT. package api import ( @@ -13,8 +13,8 @@ import ( ) const ( - BearerAuthScopes = "BearerAuth.Scopes" - TokenAuthScopes = "TokenAuth.Scopes" + BearerAuthScopes bearerAuthContextKey = "BearerAuth.Scopes" + TokenAuthScopes tokenAuthContextKey = "TokenAuth.Scopes" ) // Defines values for AccessRestrictionsCrowdsecMode. @@ -38,6 +38,30 @@ func (e AccessRestrictionsCrowdsecMode) Valid() bool { } } +// Defines values for AccountSettingsConnectionMode. +const ( + AccountSettingsConnectionModeP2p AccountSettingsConnectionMode = "p2p" + AccountSettingsConnectionModeP2pDynamic AccountSettingsConnectionMode = "p2p-dynamic" + AccountSettingsConnectionModeP2pLazy AccountSettingsConnectionMode = "p2p-lazy" + AccountSettingsConnectionModeRelayForced AccountSettingsConnectionMode = "relay-forced" +) + +// Valid indicates whether the value is a known member of the AccountSettingsConnectionMode enum. +func (e AccountSettingsConnectionMode) Valid() bool { + switch e { + case AccountSettingsConnectionModeP2p: + return true + case AccountSettingsConnectionModeP2pDynamic: + return true + case AccountSettingsConnectionModeP2pLazy: + return true + case AccountSettingsConnectionModeRelayForced: + return true + default: + return false + } +} + // Defines values for CreateAzureIntegrationRequestHost. const ( CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" @@ -511,6 +535,7 @@ func (e GroupMinimumIssued) Valid() bool { // Defines values for IdentityProviderType. const ( + IdentityProviderTypeAdfs IdentityProviderType = "adfs" IdentityProviderTypeEntra IdentityProviderType = "entra" IdentityProviderTypeGoogle IdentityProviderType = "google" IdentityProviderTypeMicrosoft IdentityProviderType = "microsoft" @@ -518,12 +543,13 @@ const ( IdentityProviderTypeOkta IdentityProviderType = "okta" IdentityProviderTypePocketid IdentityProviderType = "pocketid" IdentityProviderTypeZitadel IdentityProviderType = "zitadel" - IdentityProviderTypeAdfs IdentityProviderType = "adfs" ) // Valid indicates whether the value is a known member of the IdentityProviderType enum. func (e IdentityProviderType) Valid() bool { switch e { + case IdentityProviderTypeAdfs: + return true case IdentityProviderTypeEntra: return true case IdentityProviderTypeGoogle: @@ -538,8 +564,6 @@ func (e IdentityProviderType) Valid() bool { return true case IdentityProviderTypeZitadel: return true - case IdentityProviderTypeAdfs: - return true default: return false } @@ -1455,6 +1479,13 @@ type AccountSettings struct { // AutoUpdateVersion Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") AutoUpdateVersion *string `json:"auto_update_version,omitempty"` + // ConnectionMode Account-wide default peer-connection mode. NULL means + // "fall back to lazy_connection_enabled" for backwards compatibility. + // Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + // functional. p2p-dynamic is reserved (passes through as p2p in + // Phase 1; will become functional in Phase 2). + ConnectionMode *AccountSettingsConnectionMode `json:"connection_mode,omitempty"` + // DnsDomain Allows to define a custom dns domain for the account DnsDomain *string `json:"dns_domain,omitempty"` @@ -1483,6 +1514,17 @@ type AccountSettings struct { // NetworkRange Allows to define a custom network range for the account in CIDR format NetworkRange *string `json:"network_range,omitempty"` + // P2pRetryMaxSeconds Maximum interval between P2P retry attempts after consecutive + // ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + // disable backoff (always retry immediately, Phase-2 behavior). + // Effective only in p2p-dynamic mode (added in Phase 3). + P2pRetryMaxSeconds *int64 `json:"p2p_retry_max_seconds,omitempty"` + + // P2pTimeoutSeconds Default ICE-worker idle timeout in seconds. 0 = never tear down. + // Effective only in p2p-dynamic mode (added in Phase 2). + // NULL means "use built-in default" (180 minutes). + P2pTimeoutSeconds *int64 `json:"p2p_timeout_seconds,omitempty"` + // PeerExposeEnabled Enables or disables peer expose. If enabled, peers can expose local services through the reverse proxy using the CLI. PeerExposeEnabled bool `json:"peer_expose_enabled"` @@ -1504,10 +1546,23 @@ type AccountSettings struct { // RegularUsersViewBlocked Allows blocking regular users from viewing parts of the system. RegularUsersViewBlocked bool `json:"regular_users_view_blocked"` + // RelayTimeoutSeconds Default relay-worker idle timeout in seconds. 0 = never tear + // down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + // compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + // client. NULL means "use built-in default" (5 minutes). + RelayTimeoutSeconds *int64 `json:"relay_timeout_seconds,omitempty"` + // RoutingPeerDnsResolutionEnabled Enables or disables DNS resolution on the routing peers RoutingPeerDnsResolutionEnabled *bool `json:"routing_peer_dns_resolution_enabled,omitempty"` } +// AccountSettingsConnectionMode Account-wide default peer-connection mode. NULL means +// "fall back to lazy_connection_enabled" for backwards compatibility. +// Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are +// functional. p2p-dynamic is reserved (passes through as p2p in +// Phase 1; will become functional in Phase 2). +type AccountSettingsConnectionMode string + // AvailablePorts defines model for AvailablePorts. type AvailablePorts struct { // Tcp Number of available TCP ports left on the ingress peer @@ -1626,7 +1681,9 @@ type Checks struct { // OsVersionCheck Posture check for the version of operating system OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"` - // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. + // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it + // contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, + // so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:"peer_network_range_check,omitempty"` // ProcessCheck Posture Check for binaries exist and are running in the peer’s system @@ -3312,7 +3369,9 @@ type PeerMinimum struct { Name string `json:"name"` } -// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. +// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it +// contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, +// so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. type PeerNetworkRangeCheck struct { // Action Action to take upon policy match Action PeerNetworkRangeCheckAction `json:"action"` @@ -4761,6 +4820,12 @@ type ZoneRequest struct { // Conflict Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. type Conflict = ErrorResponse +// bearerAuthContextKey is the context key for BearerAuth security scheme +type bearerAuthContextKey string + +// tokenAuthContextKey is the context key for TokenAuth security scheme +type tokenAuthContextKey string + // GetApiEventsNetworkTrafficParams defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParams struct { // Page Page number diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 604f9c79385..879d5384150 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -71,6 +71,66 @@ func (JobStatus) EnumDescriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{0} } +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +type ConnectionMode int32 + +const ( + ConnectionMode_CONNECTION_MODE_UNSPECIFIED ConnectionMode = 0 + ConnectionMode_CONNECTION_MODE_RELAY_FORCED ConnectionMode = 1 + ConnectionMode_CONNECTION_MODE_P2P ConnectionMode = 2 + ConnectionMode_CONNECTION_MODE_P2P_LAZY ConnectionMode = 3 + ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC ConnectionMode = 4 +) + +// Enum value maps for ConnectionMode. +var ( + ConnectionMode_name = map[int32]string{ + 0: "CONNECTION_MODE_UNSPECIFIED", + 1: "CONNECTION_MODE_RELAY_FORCED", + 2: "CONNECTION_MODE_P2P", + 3: "CONNECTION_MODE_P2P_LAZY", + 4: "CONNECTION_MODE_P2P_DYNAMIC", + } + ConnectionMode_value = map[string]int32{ + "CONNECTION_MODE_UNSPECIFIED": 0, + "CONNECTION_MODE_RELAY_FORCED": 1, + "CONNECTION_MODE_P2P": 2, + "CONNECTION_MODE_P2P_LAZY": 3, + "CONNECTION_MODE_P2P_DYNAMIC": 4, + } +) + +func (x ConnectionMode) Enum() *ConnectionMode { + p := new(ConnectionMode) + *p = x + return p +} + +func (x ConnectionMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionMode) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[1].Descriptor() +} + +func (ConnectionMode) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[1] +} + +func (x ConnectionMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionMode.Descriptor instead. +func (ConnectionMode) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{1} +} + type RuleProtocol int32 const ( @@ -113,11 +173,11 @@ func (x RuleProtocol) String() string { } func (RuleProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[1].Descriptor() + return file_management_proto_enumTypes[2].Descriptor() } func (RuleProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[1] + return &file_management_proto_enumTypes[2] } func (x RuleProtocol) Number() protoreflect.EnumNumber { @@ -126,7 +186,7 @@ func (x RuleProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use RuleProtocol.Descriptor instead. func (RuleProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{2} } type RuleDirection int32 @@ -159,11 +219,11 @@ func (x RuleDirection) String() string { } func (RuleDirection) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[2].Descriptor() + return file_management_proto_enumTypes[3].Descriptor() } func (RuleDirection) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[2] + return &file_management_proto_enumTypes[3] } func (x RuleDirection) Number() protoreflect.EnumNumber { @@ -172,7 +232,7 @@ func (x RuleDirection) Number() protoreflect.EnumNumber { // Deprecated: Use RuleDirection.Descriptor instead. func (RuleDirection) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{3} } type RuleAction int32 @@ -205,11 +265,11 @@ func (x RuleAction) String() string { } func (RuleAction) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[3].Descriptor() + return file_management_proto_enumTypes[4].Descriptor() } func (RuleAction) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[3] + return &file_management_proto_enumTypes[4] } func (x RuleAction) Number() protoreflect.EnumNumber { @@ -218,7 +278,7 @@ func (x RuleAction) Number() protoreflect.EnumNumber { // Deprecated: Use RuleAction.Descriptor instead. func (RuleAction) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{3} + return file_management_proto_rawDescGZIP(), []int{4} } type ExposeProtocol int32 @@ -260,11 +320,11 @@ func (x ExposeProtocol) String() string { } func (ExposeProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (ExposeProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x ExposeProtocol) Number() protoreflect.EnumNumber { @@ -273,7 +333,7 @@ func (x ExposeProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use ExposeProtocol.Descriptor instead. func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{4} + return file_management_proto_rawDescGZIP(), []int{5} } type HostConfig_Protocol int32 @@ -315,11 +375,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[5].Descriptor() + return file_management_proto_enumTypes[6].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[5] + return &file_management_proto_enumTypes[6] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -358,11 +418,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[6].Descriptor() + return file_management_proto_enumTypes[7].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[6] + return &file_management_proto_enumTypes[7] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -2163,6 +2223,27 @@ type PeerConfig struct { Mtu int32 `protobuf:"varint,7,opt,name=mtu,proto3" json:"mtu,omitempty"` // Auto-update config AutoUpdate *AutoUpdateSettings `protobuf:"bytes,8,opt,name=autoUpdate,proto3" json:"autoUpdate,omitempty"` + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode `protobuf:"varint,11,opt,name=ConnectionMode,proto3,enum=management.ConnectionMode" json:"ConnectionMode,omitempty"` + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + P2PTimeoutSeconds uint32 `protobuf:"varint,12,opt,name=P2pTimeoutSeconds,proto3" json:"P2pTimeoutSeconds,omitempty"` + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + RelayTimeoutSeconds uint32 `protobuf:"varint,13,opt,name=RelayTimeoutSeconds,proto3" json:"RelayTimeoutSeconds,omitempty"` + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + P2PRetryMaxSeconds uint32 `protobuf:"varint,14,opt,name=P2pRetryMaxSeconds,proto3" json:"P2pRetryMaxSeconds,omitempty"` } func (x *PeerConfig) Reset() { @@ -2253,6 +2334,34 @@ func (x *PeerConfig) GetAutoUpdate() *AutoUpdateSettings { return nil } +func (x *PeerConfig) GetConnectionMode() ConnectionMode { + if x != nil { + return x.ConnectionMode + } + return ConnectionMode_CONNECTION_MODE_UNSPECIFIED +} + +func (x *PeerConfig) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + type AutoUpdateSettings struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4715,7 +4824,7 @@ var file_management_proto_rawDesc = []byte{ 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xb3, 0x04, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -4736,7 +4845,21 @@ var file_management_proto_rawDesc = []byte{ 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x42, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x30, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, + 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x12, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, @@ -5057,80 +5180,91 @@ var file_management_proto_rawDesc = []byte{ 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, - 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, - 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, - 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, - 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, - 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, - 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, - 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, - 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, - 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0xab, 0x01, 0x0a, 0x0e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1f, + 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x20, 0x0a, 0x1c, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, + 0x44, 0x45, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, + 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, + 0x50, 0x5f, 0x4c, 0x41, 0x5a, 0x59, 0x10, 0x03, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x5f, + 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x04, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, + 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, + 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, + 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, + 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x63, 0x0a, + 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, + 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x4c, 0x53, + 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, + 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, - 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, + 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, + 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, - 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, + 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, - 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, + 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, - 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, + 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, - 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5145,166 +5279,168 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 7) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 8) var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 55) var file_management_proto_goTypes = []interface{}{ (JobStatus)(0), // 0: management.JobStatus - (RuleProtocol)(0), // 1: management.RuleProtocol - (RuleDirection)(0), // 2: management.RuleDirection - (RuleAction)(0), // 3: management.RuleAction - (ExposeProtocol)(0), // 4: management.ExposeProtocol - (HostConfig_Protocol)(0), // 5: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 6: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 7: management.EncryptedMessage - (*JobRequest)(nil), // 8: management.JobRequest - (*JobResponse)(nil), // 9: management.JobResponse - (*BundleParameters)(nil), // 10: management.BundleParameters - (*BundleResult)(nil), // 11: management.BundleResult - (*SyncRequest)(nil), // 12: management.SyncRequest - (*SyncResponse)(nil), // 13: management.SyncResponse - (*SyncMetaRequest)(nil), // 14: management.SyncMetaRequest - (*LoginRequest)(nil), // 15: management.LoginRequest - (*PeerKeys)(nil), // 16: management.PeerKeys - (*Environment)(nil), // 17: management.Environment - (*File)(nil), // 18: management.File - (*Flags)(nil), // 19: management.Flags - (*PeerSystemMeta)(nil), // 20: management.PeerSystemMeta - (*LoginResponse)(nil), // 21: management.LoginResponse - (*ServerKeyResponse)(nil), // 22: management.ServerKeyResponse - (*Empty)(nil), // 23: management.Empty - (*NetbirdConfig)(nil), // 24: management.NetbirdConfig - (*HostConfig)(nil), // 25: management.HostConfig - (*RelayConfig)(nil), // 26: management.RelayConfig - (*FlowConfig)(nil), // 27: management.FlowConfig - (*JWTConfig)(nil), // 28: management.JWTConfig - (*ProtectedHostConfig)(nil), // 29: management.ProtectedHostConfig - (*PeerConfig)(nil), // 30: management.PeerConfig - (*AutoUpdateSettings)(nil), // 31: management.AutoUpdateSettings - (*NetworkMap)(nil), // 32: management.NetworkMap - (*SSHAuth)(nil), // 33: management.SSHAuth - (*MachineUserIndexes)(nil), // 34: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 35: management.RemotePeerConfig - (*SSHConfig)(nil), // 36: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 37: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 38: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 39: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 40: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 41: management.ProviderConfig - (*Route)(nil), // 42: management.Route - (*DNSConfig)(nil), // 43: management.DNSConfig - (*CustomZone)(nil), // 44: management.CustomZone - (*SimpleRecord)(nil), // 45: management.SimpleRecord - (*NameServerGroup)(nil), // 46: management.NameServerGroup - (*NameServer)(nil), // 47: management.NameServer - (*FirewallRule)(nil), // 48: management.FirewallRule - (*NetworkAddress)(nil), // 49: management.NetworkAddress - (*Checks)(nil), // 50: management.Checks - (*PortInfo)(nil), // 51: management.PortInfo - (*RouteFirewallRule)(nil), // 52: management.RouteFirewallRule - (*ForwardingRule)(nil), // 53: management.ForwardingRule - (*ExposeServiceRequest)(nil), // 54: management.ExposeServiceRequest - (*ExposeServiceResponse)(nil), // 55: management.ExposeServiceResponse - (*RenewExposeRequest)(nil), // 56: management.RenewExposeRequest - (*RenewExposeResponse)(nil), // 57: management.RenewExposeResponse - (*StopExposeRequest)(nil), // 58: management.StopExposeRequest - (*StopExposeResponse)(nil), // 59: management.StopExposeResponse - nil, // 60: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 61: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 62: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 63: google.protobuf.Duration + (ConnectionMode)(0), // 1: management.ConnectionMode + (RuleProtocol)(0), // 2: management.RuleProtocol + (RuleDirection)(0), // 3: management.RuleDirection + (RuleAction)(0), // 4: management.RuleAction + (ExposeProtocol)(0), // 5: management.ExposeProtocol + (HostConfig_Protocol)(0), // 6: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 7: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 8: management.EncryptedMessage + (*JobRequest)(nil), // 9: management.JobRequest + (*JobResponse)(nil), // 10: management.JobResponse + (*BundleParameters)(nil), // 11: management.BundleParameters + (*BundleResult)(nil), // 12: management.BundleResult + (*SyncRequest)(nil), // 13: management.SyncRequest + (*SyncResponse)(nil), // 14: management.SyncResponse + (*SyncMetaRequest)(nil), // 15: management.SyncMetaRequest + (*LoginRequest)(nil), // 16: management.LoginRequest + (*PeerKeys)(nil), // 17: management.PeerKeys + (*Environment)(nil), // 18: management.Environment + (*File)(nil), // 19: management.File + (*Flags)(nil), // 20: management.Flags + (*PeerSystemMeta)(nil), // 21: management.PeerSystemMeta + (*LoginResponse)(nil), // 22: management.LoginResponse + (*ServerKeyResponse)(nil), // 23: management.ServerKeyResponse + (*Empty)(nil), // 24: management.Empty + (*NetbirdConfig)(nil), // 25: management.NetbirdConfig + (*HostConfig)(nil), // 26: management.HostConfig + (*RelayConfig)(nil), // 27: management.RelayConfig + (*FlowConfig)(nil), // 28: management.FlowConfig + (*JWTConfig)(nil), // 29: management.JWTConfig + (*ProtectedHostConfig)(nil), // 30: management.ProtectedHostConfig + (*PeerConfig)(nil), // 31: management.PeerConfig + (*AutoUpdateSettings)(nil), // 32: management.AutoUpdateSettings + (*NetworkMap)(nil), // 33: management.NetworkMap + (*SSHAuth)(nil), // 34: management.SSHAuth + (*MachineUserIndexes)(nil), // 35: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 36: management.RemotePeerConfig + (*SSHConfig)(nil), // 37: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 38: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 39: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 40: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 41: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 42: management.ProviderConfig + (*Route)(nil), // 43: management.Route + (*DNSConfig)(nil), // 44: management.DNSConfig + (*CustomZone)(nil), // 45: management.CustomZone + (*SimpleRecord)(nil), // 46: management.SimpleRecord + (*NameServerGroup)(nil), // 47: management.NameServerGroup + (*NameServer)(nil), // 48: management.NameServer + (*FirewallRule)(nil), // 49: management.FirewallRule + (*NetworkAddress)(nil), // 50: management.NetworkAddress + (*Checks)(nil), // 51: management.Checks + (*PortInfo)(nil), // 52: management.PortInfo + (*RouteFirewallRule)(nil), // 53: management.RouteFirewallRule + (*ForwardingRule)(nil), // 54: management.ForwardingRule + (*ExposeServiceRequest)(nil), // 55: management.ExposeServiceRequest + (*ExposeServiceResponse)(nil), // 56: management.ExposeServiceResponse + (*RenewExposeRequest)(nil), // 57: management.RenewExposeRequest + (*RenewExposeResponse)(nil), // 58: management.RenewExposeResponse + (*StopExposeRequest)(nil), // 59: management.StopExposeRequest + (*StopExposeResponse)(nil), // 60: management.StopExposeResponse + nil, // 61: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 62: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 63: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 64: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 10, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 11, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters 0, // 1: management.JobResponse.status:type_name -> management.JobStatus - 11, // 2: management.JobResponse.bundle:type_name -> management.BundleResult - 20, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 24, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 35, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 32, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 50, // 8: management.SyncResponse.Checks:type_name -> management.Checks - 20, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 20, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 16, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 49, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 17, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment - 18, // 14: management.PeerSystemMeta.files:type_name -> management.File - 19, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags - 24, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 50, // 18: management.LoginResponse.Checks:type_name -> management.Checks - 62, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 25, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 29, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 25, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig - 26, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 27, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 5, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 63, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 25, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 36, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 31, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 30, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 35, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 42, // 32: management.NetworkMap.Routes:type_name -> management.Route - 43, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 35, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 48, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 52, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 53, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 33, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 60, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 36, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 28, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 6, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 41, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 41, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 46, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 44, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 45, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord - 47, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer - 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection - 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction - 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 51, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 61, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range - 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction - 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 51, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo - 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 51, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 51, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 4, // 60: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol - 34, // 61: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 7, // 62: management.ManagementService.Login:input_type -> management.EncryptedMessage - 7, // 63: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 23, // 64: management.ManagementService.GetServerKey:input_type -> management.Empty - 23, // 65: management.ManagementService.isHealthy:input_type -> management.Empty - 7, // 66: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 67: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 68: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 7, // 69: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 7, // 70: management.ManagementService.Job:input_type -> management.EncryptedMessage - 7, // 71: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage - 7, // 72: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage - 7, // 73: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage - 7, // 74: management.ManagementService.Login:output_type -> management.EncryptedMessage - 7, // 75: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 22, // 76: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 23, // 77: management.ManagementService.isHealthy:output_type -> management.Empty - 7, // 78: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 7, // 79: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 23, // 80: management.ManagementService.SyncMeta:output_type -> management.Empty - 23, // 81: management.ManagementService.Logout:output_type -> management.Empty - 7, // 82: management.ManagementService.Job:output_type -> management.EncryptedMessage - 7, // 83: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage - 7, // 84: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage - 7, // 85: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage - 74, // [74:86] is the sub-list for method output_type - 62, // [62:74] is the sub-list for method input_type - 62, // [62:62] is the sub-list for extension type_name - 62, // [62:62] is the sub-list for extension extendee - 0, // [0:62] is the sub-list for field type_name + 12, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 21, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 25, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 31, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 36, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 33, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 51, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 21, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 21, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 17, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 50, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 18, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment + 19, // 14: management.PeerSystemMeta.files:type_name -> management.File + 20, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags + 25, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 31, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 51, // 18: management.LoginResponse.Checks:type_name -> management.Checks + 63, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 26, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 30, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 26, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig + 27, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 28, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 6, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 64, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 26, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 37, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 32, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 1, // 30: management.PeerConfig.ConnectionMode:type_name -> management.ConnectionMode + 31, // 31: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 36, // 32: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 43, // 33: management.NetworkMap.Routes:type_name -> management.Route + 44, // 34: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 36, // 35: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 49, // 36: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 53, // 37: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 54, // 38: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 34, // 39: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 61, // 40: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 37, // 41: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 29, // 42: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 7, // 43: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 42, // 44: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 42, // 45: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 47, // 46: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 45, // 47: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 46, // 48: management.CustomZone.Records:type_name -> management.SimpleRecord + 48, // 49: management.NameServerGroup.NameServers:type_name -> management.NameServer + 3, // 50: management.FirewallRule.Direction:type_name -> management.RuleDirection + 4, // 51: management.FirewallRule.Action:type_name -> management.RuleAction + 2, // 52: management.FirewallRule.Protocol:type_name -> management.RuleProtocol + 52, // 53: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 62, // 54: management.PortInfo.range:type_name -> management.PortInfo.Range + 4, // 55: management.RouteFirewallRule.action:type_name -> management.RuleAction + 2, // 56: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol + 52, // 57: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 2, // 58: management.ForwardingRule.protocol:type_name -> management.RuleProtocol + 52, // 59: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 52, // 60: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 5, // 61: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol + 35, // 62: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 8, // 63: management.ManagementService.Login:input_type -> management.EncryptedMessage + 8, // 64: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 24, // 65: management.ManagementService.GetServerKey:input_type -> management.Empty + 24, // 66: management.ManagementService.isHealthy:input_type -> management.Empty + 8, // 67: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 8, // 68: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 8, // 69: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 8, // 70: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 8, // 71: management.ManagementService.Job:input_type -> management.EncryptedMessage + 8, // 72: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage + 8, // 73: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage + 8, // 74: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage + 8, // 75: management.ManagementService.Login:output_type -> management.EncryptedMessage + 8, // 76: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 23, // 77: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 24, // 78: management.ManagementService.isHealthy:output_type -> management.Empty + 8, // 79: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 8, // 80: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 24, // 81: management.ManagementService.SyncMeta:output_type -> management.Empty + 24, // 82: management.ManagementService.Logout:output_type -> management.Empty + 8, // 83: management.ManagementService.Job:output_type -> management.EncryptedMessage + 8, // 84: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage + 8, // 85: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage + 8, // 86: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage + 75, // [75:87] is the sub-list for method output_type + 63, // [63:75] is the sub-list for method input_type + 63, // [63:63] is the sub-list for extension type_name + 63, // [63:63] is the sub-list for extension extendee + 0, // [0:63] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -5977,7 +6113,7 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 7, + NumEnums: 8, NumMessages: 55, NumExtensions: 0, NumServices: 1, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 70a53067974..12509cbc9f8 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -335,6 +335,48 @@ message PeerConfig { // Auto-update config AutoUpdateSettings autoUpdate = 8; + + // Tags 9 and 10 are intentionally left unused so that future small + // additions can land without re-numbering the new connection-mode + // fields. Reserved here to make the gap explicit for any reviewer. + reserved 9, 10; + + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode = 11; + + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + uint32 P2pTimeoutSeconds = 12; + + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + uint32 RelayTimeoutSeconds = 13; + + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + uint32 P2pRetryMaxSeconds = 14; +} + +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +enum ConnectionMode { + CONNECTION_MODE_UNSPECIFIED = 0; + CONNECTION_MODE_RELAY_FORCED = 1; + CONNECTION_MODE_P2P = 2; + CONNECTION_MODE_P2P_LAZY = 3; + CONNECTION_MODE_P2P_DYNAMIC = 4; } message AutoUpdateSettings {