diff --git a/.gitignore b/.gitignore index a0f128933ef..ca5e81e996a 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ infrastructure_files/**/docker-compose.yml.bkp.** infrastructure_files/**/openid-configuration.json.bkp.** infrastructure_files/**/turnserver.conf.bkp.** management/management +management/netbird-mgmt client/client client/client.exe *.syso diff --git a/client/android/client.go b/client/android/client.go index 37e17a36319..0caa60dd598 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "slices" + "strings" "sync" "time" @@ -250,6 +251,13 @@ func (c *Client) DebugBundle(platformFiles PlatformFiles, anonymize bool) (strin if cm := e.GetClientMetrics(); cm != nil { deps.ClientMetrics = cm } + // Phase 3.7h (#5989): record server-pushed mode + timers in config.txt. + if cm := e.ConnMgr(); cm != nil { + deps.ServerPushedConnectionMode = cm.ServerPushedMode().String() + deps.ServerPushedRelayTimeoutSec = cm.ServerPushedRelayTimeoutSecs() + deps.ServerPushedP2pTimeoutSec = cm.ServerPushedP2pTimeoutSecs() + deps.ServerPushedP2pRetryMaxSec = cm.ServerPushedP2pRetryMaxSecs() + } } } @@ -295,17 +303,56 @@ func (c *Client) SetInfoLogLevel() { // PeersList return with the list of the PeerInfos func (c *Client) PeersList() *PeerInfoArray { + // Refresh WireGuard counters (BytesRx/Tx + LastWireguardHandshake) + // from the kernel/uapi interface before snapshotting. Without this + // the Android UI sees the stale values that were last written when + // the peer was opened/closed (typically 0), because the desktop + // CLI's Status RPC is what normally drives RefreshWireGuardStats. + // Phase 3.7i. + if err := c.recorder.RefreshWireGuardStats(); err != nil { + log.Debugf("PeersList: refresh wg stats: %v", err) + } fullStatus := c.recorder.GetFullStatus() peerInfos := make([]PeerInfo, len(fullStatus.Peers)) for n, p := range fullStatus.Peers { pi := PeerInfo{ - p.IP, - p.FQDN, - int(p.ConnStatus), - PeerRoutes{routes: maps.Keys(p.GetRoutes())}, + IP: p.IP, + FQDN: p.FQDN, + ConnStatus: int(p.ConnStatus), + Routes: PeerRoutes{routes: maps.Keys(p.GetRoutes())}, + } + + // Phase 3.7i (#5989): enrichment fields. + pi.Relayed = p.Relayed + pi.ServerOnline = p.ServerOnline + pi.LocalIceCandidateEndpoint = p.LocalIceCandidateEndpoint + pi.RemoteIceCandidateEndpoint = p.RemoteIceCandidateEndpoint + pi.RelayServerAddress = p.RelayServerAddress + if !p.LastWireguardHandshake.IsZero() { + pi.LastWireguardHandshake = p.LastWireguardHandshake.Format(time.RFC3339) + } + if !p.RemoteLastSeenAtServer.IsZero() { + pi.LastSeenAtServer = p.RemoteLastSeenAtServer.Format(time.RFC3339) + } + pi.LatencyMs = p.Latency.Milliseconds() + pi.BytesRx = p.BytesRx + pi.BytesTx = p.BytesTx + pi.EffectiveConnectionMode = p.RemoteEffectiveConnectionMode + pi.ConfiguredConnectionMode = p.RemoteConfiguredConnectionMode + if len(p.RemoteGroups) > 0 { + pi.Groups = strings.Join(p.RemoteGroups, ",") } + pi.ConnectionTypeExtended = peer.DeriveConnectionTypeExtended(p) + pi.IceBackoffFailures = int32(p.IceBackoffFailures) + if !p.IceBackoffNextRetry.IsZero() { + pi.IceBackoffNextRetry = p.IceBackoffNextRetry.Format(time.RFC3339) + } + pi.IceBackoffSuspended = p.IceBackoffSuspended + // AgentVersion / OsVersion: peer.State does not expose these fields; + // left empty until daemon surfaces them (future phase). + peerInfos[n] = pi } return &PeerInfoArray{items: peerInfos} @@ -394,6 +441,102 @@ func (c *Client) RemoveConnectionListener() { c.recorder.RemoveConnectionListener() } +// GetServerPushedConnectionMode returns the canonical name of the +// connection mode the management server most recently pushed via +// PeerConfig (independent of any local profile/env override). Returns +// an empty string when the engine has not connected yet or the server +// has not pushed a value -- the Android UI then knows to display +// just "Follow server" without the (currently: ...) suffix. +func (c *Client) GetServerPushedConnectionMode() string { + cm := c.connMgrSafe() + if cm == nil { + return "" + } + return cm.ServerPushedMode().String() +} + +// GetServerPushedRelayTimeoutSecs returns the relay timeout in seconds +// most recently pushed by the management server, or 0 when no value +// has been received. Used by the Android UI as a hint. +func (c *Client) GetServerPushedRelayTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedRelayTimeoutSecs()) +} + +// GetServerPushedP2pTimeoutSecs returns the ICE-only timeout (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pTimeoutSecs()) +} + +// GetServerPushedP2pRetryMaxSecs returns the ICE-backoff cap (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pRetryMaxSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pRetryMaxSecs()) +} + +// GetConfiguredPeersTotal returns the total number of configured peers +// (server-online + server-offline). Phase 3.7i (#5989). +func (c *Client) GetConfiguredPeersTotal() int64 { + return int64(c.recorder.GetFullStatus().ConfiguredPeersTotal) +} + +// GetServerOnlinePeers returns the number of peers that are reachable via +// the server (P2P + Relayed + Idle). Phase 3.7i (#5989). +func (c *Client) GetServerOnlinePeers() int64 { + return int64(c.recorder.GetFullStatus().ServerOnlinePeers) +} + +// GetP2PConnectedPeers returns the number of peers connected via direct +// P2P (ICE). Phase 3.7i (#5989). +func (c *Client) GetP2PConnectedPeers() int64 { + return int64(c.recorder.GetFullStatus().P2PConnectedPeers) +} + +// GetRelayedConnectedPeers returns the number of peers connected via relay. +// Phase 3.7i (#5989). +func (c *Client) GetRelayedConnectedPeers() int64 { + return int64(c.recorder.GetFullStatus().RelayedConnectedPeers) +} + +// GetIdleOnlinePeers returns the number of peers that are online on the +// server but have no active connection yet. Phase 3.7i (#5989). +func (c *Client) GetIdleOnlinePeers() int64 { + return int64(c.recorder.GetFullStatus().IdleOnlinePeers) +} + +// GetServerOfflinePeers returns the number of peers that are not reachable +// via the server. Phase 3.7i (#5989). +func (c *Client) GetServerOfflinePeers() int64 { + return int64(c.recorder.GetFullStatus().ServerOfflinePeers) +} + +// connMgrSafe is a small helper that walks the Client -> ConnectClient +// -> Engine -> ConnMgr chain and returns nil at the first nil pointer. +// Each accessor that surfaces engine state to the Android UI uses it. +func (c *Client) connMgrSafe() *internal.ConnMgr { + cc := c.getConnectClient() + if cc == nil { + return nil + } + engine := cc.Engine() + if engine == nil { + return nil + } + return engine.ConnMgr() +} + func (c *Client) toggleRoute(command routeCommand) error { return command.toggleRoute() } diff --git a/client/android/peer_notifier.go b/client/android/peer_notifier.go index 4ec22f3ab45..0f3affbe321 100644 --- a/client/android/peer_notifier.go +++ b/client/android/peer_notifier.go @@ -17,6 +17,37 @@ type PeerInfo struct { FQDN string ConnStatus int Routes PeerRoutes + + // Phase 3.7i (#5989): per-peer enrichment fields. Strings for + // gomobile-friendliness (no time.Time / no []string). + Relayed bool + ServerOnline bool + LocalIceCandidateEndpoint string + RemoteIceCandidateEndpoint string + RelayServerAddress string + LastWireguardHandshake string // RFC3339; "" if zero + LastSeenAtServer string // RFC3339; "" if zero + LatencyMs int64 + BytesRx int64 + BytesTx int64 + EffectiveConnectionMode string + ConfiguredConnectionMode string + Groups string // comma-separated + AgentVersion string + OsVersion string + // Phase 3.7i hybrid display: daemon-derived UI label. + // Values: "", "P2P", "Relayed", "Relayed (negotiating P2P)". + // UIs should prefer this over (Relayed bool) when non-empty so the + // transient post-wakeup negotiation window renders identically + // across Android / Windows / Dashboard. + ConnectionTypeExtended string + + // Phase 3.7i lifecycle hardening: ICE-backoff snapshot. Lets the UI + // explain why a peer is staying on Relayed when failures pile up + // (gomobile-friendly: no time.Time exported — RFC3339 string). + IceBackoffFailures int32 + IceBackoffNextRetry string // RFC3339; "" if zero + IceBackoffSuspended bool } func (p *PeerInfo) GetPeerRoutes() *PeerRoutes { diff --git a/client/android/preferences.go b/client/android/preferences.go index c3c8eb3fbc9..e26ce06928e 100644 --- a/client/android/preferences.go +++ b/client/android/preferences.go @@ -307,6 +307,107 @@ func (p *Preferences) SetBlockInbound(block bool) { p.configInput.BlockInbound = &block } +// GetConnectionMode returns the locally configured connection-mode override +// (canonical lower-kebab-case: "relay-forced", "p2p", "p2p-lazy", +// "p2p-dynamic", "follow-server"), or empty string if no local override +// is configured -- the daemon will then follow the server-pushed value. +func (p *Preferences) GetConnectionMode() (string, error) { + if p.configInput.ConnectionMode != nil { + return *p.configInput.ConnectionMode, nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return "", err + } + return cfg.ConnectionMode, nil +} + +// SetConnectionMode stores a local override for the connection mode. +// Pass an empty string to clear the override (revert to following the +// server-pushed value). +func (p *Preferences) SetConnectionMode(mode string) { + m := mode + p.configInput.ConnectionMode = &m +} + +// GetRelayTimeoutSeconds returns the locally configured relay-worker +// inactivity timeout in seconds, or 0 if no override is set (follow +// server-pushed value, or built-in default if the server has none). +func (p *Preferences) GetRelayTimeoutSeconds() (int64, error) { + if p.configInput.RelayTimeoutSeconds != nil { + return int64(*p.configInput.RelayTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.RelayTimeoutSeconds), nil +} + +// SetRelayTimeoutSeconds stores a local override for the relay timeout. +// Pass 0 to clear the override. Negative values are clamped to 0; +// values larger than MaxUint32 are clamped to MaxUint32. The Android +// AdvancedFragment UI already clamps negatives but a Java caller using +// the bare gomobile API directly would otherwise wrap silently. +func (p *Preferences) SetRelayTimeoutSeconds(secs int64) { + v := clampUint32Seconds(secs) + p.configInput.RelayTimeoutSeconds = &v +} + +// GetP2pTimeoutSeconds returns the locally configured ICE-worker +// inactivity timeout in seconds (only effective in p2p-dynamic mode), +// or 0 if no override is set. +func (p *Preferences) GetP2pTimeoutSeconds() (int64, error) { + if p.configInput.P2pTimeoutSeconds != nil { + return int64(*p.configInput.P2pTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pTimeoutSeconds), nil +} + +// SetP2pTimeoutSeconds stores a local override for the p2p timeout. +// Pass 0 to clear the override. See SetRelayTimeoutSeconds for clamping. +func (p *Preferences) SetP2pTimeoutSeconds(secs int64) { + v := clampUint32Seconds(secs) + p.configInput.P2pTimeoutSeconds = &v +} + +// GetP2pRetryMaxSeconds returns the locally configured cap on the +// per-peer ICE-failure backoff schedule, or 0 if no override is set. +func (p *Preferences) GetP2pRetryMaxSeconds() (int64, error) { + if p.configInput.P2pRetryMaxSeconds != nil { + return int64(*p.configInput.P2pRetryMaxSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pRetryMaxSeconds), nil +} + +// SetP2pRetryMaxSeconds stores a local override for the backoff cap. +// Pass 0 to clear the override. See SetRelayTimeoutSeconds for clamping. +func (p *Preferences) SetP2pRetryMaxSeconds(secs int64) { + v := clampUint32Seconds(secs) + p.configInput.P2pRetryMaxSeconds = &v +} + +// clampUint32Seconds maps an int64 seconds value into the uint32 range +// the daemon stores internally. Negative -> 0. >MaxUint32 -> MaxUint32. +// Defensive against Java callers that bypass UI validation. +func clampUint32Seconds(secs int64) uint32 { + if secs <= 0 { + return 0 + } + if secs > int64(^uint32(0)) { + return ^uint32(0) + } + return uint32(secs) +} + // Commit writes out the changes to the config file func (p *Preferences) Commit() error { _, err := profilemanager.UpdateOrCreateConfig(p.configInput) diff --git a/client/android/preferences_clamp_test.go b/client/android/preferences_clamp_test.go new file mode 100644 index 00000000000..b397aa8485d --- /dev/null +++ b/client/android/preferences_clamp_test.go @@ -0,0 +1,37 @@ +package android + +import ( + "math" + "testing" +) + +// Codex review: Preferences.SetXxxSeconds used to cast int64 directly +// to uint32, silently wrapping negatives into huge positives and +// truncating values >MaxUint32. Lock down the new clamp behavior. +func TestClampUint32Seconds(t *testing.T) { + maxU := uint32(math.MaxUint32) + tests := []struct { + name string + input int64 + want uint32 + }{ + {"zero", 0, 0}, + {"one", 1, 1}, + {"3h_typical", 10800, 10800}, + {"24h_typical", 86400, 86400}, + {"max_uint32_exact", int64(math.MaxUint32), maxU}, + {"max_uint32_plus_one_clamps", int64(math.MaxUint32) + 1, maxU}, + {"int64_max_clamps", math.MaxInt64, maxU}, + {"negative_one_clamps_to_zero", -1, 0}, + {"negative_huge_clamps_to_zero", -86400, 0}, + {"int64_min_clamps_to_zero", math.MinInt64, 0}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := clampUint32Seconds(tc.input) + if got != tc.want { + t.Errorf("clampUint32Seconds(%d) = %d, want %d", tc.input, got, tc.want) + } + }) + } +} diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 2a8cdc88737..425dc2731f4 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -425,14 +425,28 @@ func generateDebugBundle(config *profilemanager.Config, recorder *peer.Status, c } } + deps := debug.GeneratorDependencies{ + InternalConfig: config, + StatusRecorder: recorder, + SyncResponse: syncResponse, + LogPath: logFilePath, + CPUProfile: nil, + } + + // Phase 3.7h (#5989): record server-pushed mode + timers in config.txt. + if connectClient != nil { + if eng := connectClient.Engine(); eng != nil { + if cm := eng.ConnMgr(); cm != nil { + deps.ServerPushedConnectionMode = cm.ServerPushedMode().String() + deps.ServerPushedRelayTimeoutSec = cm.ServerPushedRelayTimeoutSecs() + deps.ServerPushedP2pTimeoutSec = cm.ServerPushedP2pTimeoutSecs() + deps.ServerPushedP2pRetryMaxSec = cm.ServerPushedP2pRetryMaxSecs() + } + } + } + bundleGenerator := debug.NewBundleGenerator( - debug.GeneratorDependencies{ - InternalConfig: config, - StatusRecorder: recorder, - SyncResponse: syncResponse, - LogPath: logFilePath, - CPUProfile: nil, - }, + deps, debug.BundleConfig{ IncludeSystemInfo: true, }, diff --git a/client/cmd/root.go b/client/cmd/root.go index 29d4328a1f7..a4e8e934976 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -39,6 +39,10 @@ const ( extraIFaceBlackListFlag = "extra-iface-blacklist" dnsRouteIntervalFlag = "dns-router-interval" enableLazyConnectionFlag = "enable-lazy-connection" + connectionModeFlag = "connection-mode" + relayTimeoutFlag = "relay-timeout" + p2pTimeoutFlag = "p2p-timeout" + p2pRetryMaxFlag = "p2p-retry-max" mtuFlag = "mtu" ) @@ -72,6 +76,10 @@ var ( anonymizeFlag bool dnsRouteInterval time.Duration lazyConnEnabled bool + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 mtu uint16 profilesDisabled bool updateSettingsDisabled bool @@ -192,6 +200,15 @@ func init() { upCmd.PersistentFlags().BoolVar(&rosenpassPermissive, rosenpassPermissiveFlag, false, "[Experimental] Enable Rosenpass in permissive mode to allow this peer to accept WireGuard connections without requiring Rosenpass functionality from peers that do not have Rosenpass enabled.") upCmd.PersistentFlags().BoolVar(&autoConnectDisabled, disableAutoConnectFlag, false, "Disables auto-connect feature. If enabled, then the client won't connect automatically when the service starts.") upCmd.PersistentFlags().BoolVar(&lazyConnEnabled, enableLazyConnectionFlag, false, "[Experimental] Enable the lazy connection feature. If enabled, the client will establish connections on-demand. Note: this setting may be overridden by management configuration.") + upCmd.PersistentFlags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + upCmd.PersistentFlags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + upCmd.PersistentFlags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default). Only effective in p2p-dynamic mode (Phase 2).") + upCmd.PersistentFlags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min). Effective in p2p-dynamic mode (Phase 3 of #5989).") } diff --git a/client/cmd/service.go b/client/cmd/service.go index 56d8a8726fa..f8e6e97fecd 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -57,6 +57,24 @@ func init() { installCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) reconfigureCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) + // Profile-level connection-mode + timeout flags. Same semantics as on + // `netbird up` but writeable at install time so server/headless + // installs can pre-seed the active profile before the daemon starts. + // Same package-level vars are shared with upCmd; on `up` they take + // effect through setupConfig(), here we apply them once before + // installing the service so the daemon picks them up on first run. + for _, c := range []*cobra.Command{installCmd, reconfigureCmd} { + c.Flags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + c.Flags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + c.Flags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value. Only effective in p2p-dynamic mode.") + c.Flags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min).") + } + rootCmd.AddCommand(serviceCmd) } diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index 2d45fa063d8..449c910ff51 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -15,6 +15,7 @@ import ( "github.com/kardianos/service" "github.com/spf13/cobra" + "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/util" ) @@ -131,6 +132,12 @@ var installCmd = &cobra.Command{ cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + // Persist any profile-level connection-mode/timeout flags that + // were explicitly set so the daemon picks them up on first start. + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + svcConfig, err := createServiceConfigForInstall() if err != nil { return err @@ -157,6 +164,52 @@ var installCmd = &cobra.Command{ }, } +// applyConnectionModeFlagsToProfile writes the connection-mode + +// timeout flags into the active profile's config file so the daemon +// will use them on its next startup. Only fields whose flag was +// explicitly set are touched; missing flags leave the existing +// profile values intact. Used by install + reconfigure so headless +// deployments can pre-seed everything in a single command. +func applyConnectionModeFlagsToProfile(cmd *cobra.Command) error { + anyChanged := false + for _, name := range []string{connectionModeFlag, relayTimeoutFlag, p2pTimeoutFlag, p2pRetryMaxFlag} { + if f := cmd.Flag(name); f != nil && f.Changed { + anyChanged = true + break + } + } + if !anyChanged { + return nil + } + + cfgPath := profilemanager.DefaultConfigPath + if configPath != "" { + cfgPath = configPath + } + if cfgPath == "" { + return fmt.Errorf("default config path is not set on this platform; pass --config") + } + + ic := profilemanager.ConfigInput{ConfigPath: cfgPath} + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } + if _, err := profilemanager.UpdateOrCreateConfig(ic); err != nil { + return fmt.Errorf("write profile %s: %w", cfgPath, err) + } + cmd.Println("connection-mode/timeout flags persisted to profile:", cfgPath) + return nil +} + var uninstallCmd = &cobra.Command{ Use: "uninstall", Short: "uninstalls NetBird service from system", @@ -207,6 +260,10 @@ This command will temporarily stop the service, update its configuration, and re cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + wasRunning, err := isServiceRunning() if err != nil && !errors.Is(err, ErrGetServiceStatus) { return fmt.Errorf("check service status: %w", err) diff --git a/client/cmd/status.go b/client/cmd/status.go index c35a06eb3c6..3c0de9a0593 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -79,6 +79,15 @@ func statusFunc(cmd *cobra.Command, args []string) error { return err } + // Phase 3.7h (#5989): also fetch GetConfig so the status output can show + // the effective connection-mode + lifecycle timers and the values most + // recently pushed by management. Failure is non-fatal — the rest of the + // status output is still useful even if these fields end up zero. + cfgResp, cfgErr := getDaemonConfig(ctx) + if cfgErr != nil { + cfgResp = &proto.GetConfigResponse{} + } + status := resp.GetStatus() needsAuth := status == string(internal.StatusNeedsLogin) || status == string(internal.StatusLoginFailed) || @@ -117,6 +126,15 @@ func statusFunc(cmd *cobra.Command, args []string) error { IPsFilter: ipsFilterMap, ConnectionTypeFilter: connectionTypeFilter, ProfileName: profName, + + ConnectionMode: cfgResp.GetConnectionMode(), + RelayTimeoutSeconds: cfgResp.GetRelayTimeoutSeconds(), + P2pTimeoutSeconds: cfgResp.GetP2PTimeoutSeconds(), + P2pRetryMaxSeconds: cfgResp.GetP2PRetryMaxSeconds(), + ServerPushedConnectionMode: cfgResp.GetServerPushedConnectionMode(), + ServerPushedRelayTimeoutSeconds: cfgResp.GetServerPushedRelayTimeoutSeconds(), + ServerPushedP2pTimeoutSeconds: cfgResp.GetServerPushedP2PTimeoutSeconds(), + ServerPushedP2pRetryMaxSeconds: cfgResp.GetServerPushedP2PRetryMaxSeconds(), }) var statusOutputString string switch { @@ -157,6 +175,22 @@ func getStatus(ctx context.Context, fullPeerStatus bool, shouldRunProbes bool) ( return resp, nil } +// getDaemonConfig fetches the daemon's effective and server-pushed config so +// the status command can render Phase-3.7h connection-mode + timer values. +func getDaemonConfig(ctx context.Context) (*proto.GetConfigResponse, error) { + conn, err := DialClientGRPCServer(ctx, daemonAddr) + if err != nil { + return nil, fmt.Errorf("dial daemon: %w", err) + } + defer conn.Close() + + resp, err := proto.NewDaemonServiceClient(conn).GetConfig(ctx, &proto.GetConfigRequest{}) + if err != nil { + return nil, fmt.Errorf("getConfig: %w", err) + } + return resp, nil +} + func parseFilters() error { switch strings.ToLower(statusFilter) { case "", "idle", "connecting", "connected": diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index c24965e8d82..2bcae0717fe 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" clientProto "github.com/netbirdio/netbird/client/proto" client "github.com/netbirdio/netbird/client/server" @@ -135,7 +136,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(5*time.Minute), peer_connections.NewSnapshotRouter()) if err != nil { t.Fatal(err) } diff --git a/client/cmd/up.go b/client/cmd/up.go index f4136cb2343..cba3edddee9 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -439,6 +439,19 @@ func setupSetConfigReq(customDNSAddressConverted []byte, cmd *cobra.Command, pro req.LazyConnectionEnabled = &lazyConnEnabled } + if cmd.Flag(connectionModeFlag).Changed { + req.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + req.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + req.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + req.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } + return &req } @@ -555,6 +568,19 @@ func setupConfig(customDNSAddressConverted []byte, cmd *cobra.Command, configFil if cmd.Flag(enableLazyConnectionFlag).Changed { ic.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } return &ic, nil } @@ -669,6 +695,19 @@ func setupLoginRequest(providedSetupKey string, customDNSAddressConverted []byte if cmd.Flag(enableLazyConnectionFlag).Changed { loginRequest.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + loginRequest.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + loginRequest.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + loginRequest.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + loginRequest.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } return &loginRequest, nil } diff --git a/client/iface/bind/activity.go b/client/iface/bind/activity.go index 57862e3d144..ff69aec9cfb 100644 --- a/client/iface/bind/activity.go +++ b/client/iface/bind/activity.go @@ -16,6 +16,7 @@ const ( ) type PeerRecord struct { + PublicKey string Address netip.AddrPort LastActivity atomic.Int64 // UnixNano timestamp } @@ -24,6 +25,12 @@ type ActivityRecorder struct { mu sync.RWMutex peers map[string]*PeerRecord // publicKey to PeerRecord map addrToPeer map[netip.AddrPort]*PeerRecord // address to PeerRecord map + // onActivity, if set, is invoked once per saveFrequency-window per + // peer when transport activity is observed. Used by the engine's + // connMgr to fast-path ICE re-attach for peers that fell back to + // relay-only on iceTimeout (Codex review 2026-05-05). Rate-limited + // piggybacks the existing CAS to avoid a hot-path allocation. + onActivity func(pubKey string) } func NewActivityRecorder() *ActivityRecorder { @@ -33,6 +40,16 @@ func NewActivityRecorder() *ActivityRecorder { } } +// SetOnActivity registers a callback invoked at most once per +// saveFrequency (5s) per peer when transport activity is recorded. +// Pass nil to clear. Safe to call before the recorder starts seeing +// traffic. +func (r *ActivityRecorder) SetOnActivity(cb func(pubKey string)) { + r.mu.Lock() + r.onActivity = cb + r.mu.Unlock() +} + // GetLastActivities returns a snapshot of peer last activity func (r *ActivityRecorder) GetLastActivities() map[string]monotime.Time { r.mu.RLock() @@ -58,7 +75,8 @@ func (r *ActivityRecorder) UpsertAddress(publicKey string, address netip.AddrPor record.Address = address } else { record = &PeerRecord{ - Address: address, + PublicKey: publicKey, + Address: address, } record.LastActivity.Store(int64(monotime.Now())) r.peers[publicKey] = record @@ -80,6 +98,7 @@ func (r *ActivityRecorder) Remove(publicKey string) { func (r *ActivityRecorder) record(address netip.AddrPort) { r.mu.RLock() record, ok := r.addrToPeer[address] + cb := r.onActivity r.mu.RUnlock() if !ok { log.Warnf("could not find record for address %s", address) @@ -92,5 +111,12 @@ func (r *ActivityRecorder) record(address netip.AddrPort) { return } - _ = record.LastActivity.CompareAndSwap(last, now) + if record.LastActivity.CompareAndSwap(last, now) && cb != nil { + // Fire only on the actual save edge (CAS success). Prevents + // duplicate events when many goroutines race on the same packet + // burst. Callback runs synchronously on the WG read/write + // goroutine -- handler MUST be cheap or self-defer to its own + // goroutine. + cb(record.PublicKey) + } } diff --git a/client/iface/device/endpoint_manager.go b/client/iface/device/endpoint_manager.go index b53888baab6..c7b1fe0731a 100644 --- a/client/iface/device/endpoint_manager.go +++ b/client/iface/device/endpoint_manager.go @@ -3,6 +3,8 @@ package device import ( "net" "net/netip" + + "github.com/netbirdio/netbird/client/iface/bind" ) // EndpointManager manages fake IP to connection mappings for userspace bind implementations. @@ -10,4 +12,11 @@ import ( type EndpointManager interface { SetEndpoint(fakeIP netip.Addr, conn net.Conn) RemoveEndpoint(fakeIP netip.Addr) + // ActivityRecorder exposes the per-bind ActivityRecorder so the + // engine can wire its OnActivity callback (Codex review 2026-05-05, + // fast-path Relay -> P2P upgrade trigger). Always non-nil on + // userspace binds. Kernel-mode WG returns nil from GetICEBind so + // callers MUST nil-check the EndpointManager itself before + // dereferencing. + ActivityRecorder() *bind.ActivityRecorder } diff --git a/client/internal/conn_mgr.go b/client/internal/conn_mgr.go index 112559132a1..97517146eb2 100644 --- a/client/internal/conn_mgr.go +++ b/client/internal/conn_mgr.go @@ -14,6 +14,8 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/peerstore" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) // ConnMgr coordinates both lazy connections (established on-demand) and permanent peer connections. @@ -28,9 +30,44 @@ type ConnMgr struct { peerStore *peerstore.Store statusRecorder *peer.Status iface lazyconn.WGIface - enabledLocally bool rosenpassEnabled bool + // Resolved values used to drive lifecycle decisions. Updated when + // the management server pushes a new PeerConfig. + mode connectionmode.Mode + relayTimeoutSecs uint32 + // Phase 2 (#5989): ICE-only inactivity timeout (seconds). Used in + // ModeP2PDynamic to teardown the ICE worker without affecting the + // relay tunnel. 0 = ICE never times out. + p2pTimeoutSecs uint32 + // Phase 3 (#5989): maximum seconds between P2P retry attempts. + // 0 means the daemon uses its built-in default. + p2pRetryMaxSecs uint32 + + // Raw inputs kept so we can re-resolve when server-pushed value changes. + envMode connectionmode.Mode + envRelayTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + cfgP2pRetryMax uint32 + + // spMu protects all serverPushed* fields below. Written in + // UpdatedRemotePeerConfig (NetworkMap goroutine), read by + // ServerPushed*() accessors (daemon-RPC GetConfig goroutine). + spMu sync.RWMutex + + // serverPushedMode is the ConnectionMode value that was last received + // from the management server's PeerConfig (independent of any local + // env/cfg override). Updated in UpdatedRemotePeerConfig. Used by the + // Android UI to display "Follow server (currently: )" in the + // connection-mode override dropdown so users can see what they would + // inherit if they leave the override on "Follow server". + serverPushedMode connectionmode.Mode + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + lazyConnMgr *manager.Manager wg sync.WaitGroup @@ -39,72 +76,279 @@ type ConnMgr struct { } func NewConnMgr(engineConfig *EngineConfig, statusRecorder *peer.Status, peerStore *peerstore.Store, iface lazyconn.WGIface) *ConnMgr { - e := &ConnMgr{ + envMode, envRelayTimeout := peer.ResolveModeFromEnv() + + // First-pass resolution without server input -- updated later when + // the first NetworkMap arrives via UpdatedRemotePeerConfig. + mode, relayTimeout, p2pTimeout, p2pRetryMax := resolveConnectionMode( + envMode, envRelayTimeout, + engineConfig.ConnectionMode, engineConfig.RelayTimeoutSeconds, + engineConfig.P2pTimeoutSeconds, + engineConfig.P2pRetryMaxSeconds, + nil, + ) + + return &ConnMgr{ peerStore: peerStore, statusRecorder: statusRecorder, iface: iface, rosenpassEnabled: engineConfig.RosenpassEnabled, + mode: mode, + relayTimeoutSecs: relayTimeout, + p2pTimeoutSecs: p2pTimeout, + p2pRetryMaxSecs: p2pRetryMax, + envMode: envMode, + envRelayTimeout: envRelayTimeout, + cfgMode: engineConfig.ConnectionMode, + cfgRelayTimeout: engineConfig.RelayTimeoutSeconds, + cfgP2pTimeout: engineConfig.P2pTimeoutSeconds, + cfgP2pRetryMax: engineConfig.P2pRetryMaxSeconds, } - if engineConfig.LazyConnectionEnabled || lazyconn.IsLazyConnEnabledByEnv() { - e.enabledLocally = true +} + +// resolveConnectionMode applies the spec-section-4.1 precedence chain: +// 1. client env (already resolved by caller via peer.ResolveModeFromEnv) +// 2. client config (from profile, including the FollowServer sentinel) +// 3. server-pushed PeerConfig.ConnectionMode (with UNSPECIFIED -> +// legacy LazyConnectionEnabled fallback) +// +// Returns the resolved Mode, the resolved relay-timeout in seconds, and +// the resolved p2p-timeout in seconds. 0 for either timeout means the +// caller should use its built-in default. +func resolveConnectionMode( + envMode connectionmode.Mode, + envRelayTimeout uint32, + cfgMode connectionmode.Mode, + cfgRelayTimeout uint32, + cfgP2pTimeout uint32, + cfgP2pRetryMax uint32, + serverPC *mgmProto.PeerConfig, +) (connectionmode.Mode, uint32, uint32, uint32) { + mode := envMode + if mode == connectionmode.ModeUnspecified { + if cfgMode != connectionmode.ModeUnspecified && cfgMode != connectionmode.ModeFollowServer { + mode = cfgMode + } + } + if mode == connectionmode.ModeUnspecified { + if serverPC != nil { + serverMode := connectionmode.FromProto(serverPC.GetConnectionMode()) + if serverMode != connectionmode.ModeUnspecified { + mode = serverMode + } else { + mode = connectionmode.ResolveLegacyLazyBool(serverPC.GetLazyConnectionEnabled()) + } + } else { + mode = connectionmode.ModeP2P // safe default when nothing at all is known + } + } + + // Relay-timeout precedence (analog). + relay := envRelayTimeout + if relay == 0 { + relay = cfgRelayTimeout } - return e + if relay == 0 && serverPC != nil { + relay = serverPC.GetRelayTimeoutSeconds() + } + + // P2P-timeout precedence: client config wins over server push. No env + // var in Phase 2; reserved for Phase 3. + p2p := cfgP2pTimeout + if p2p == 0 && serverPC != nil { + p2p = serverPC.GetP2PTimeoutSeconds() + } + + // P2pRetryMax resolution (analogous to p2p timeout): + // client-config wins over server-pushed value (0 = not set). + p2pRetryMax := cfgP2pRetryMax + if p2pRetryMax == 0 && serverPC != nil { + p2pRetryMax = serverPC.GetP2PRetryMaxSeconds() + } + + return mode, relay, p2p, p2pRetryMax } -// Start initializes the connection manager and starts the lazy connection manager if enabled by env var or cmd line option. +// Start initializes the connection manager. The lazy/dynamic connection +// manager is brought up immediately when the resolved Mode is P2PLazy +// or P2PDynamic. Other modes keep the manager dormant; it can still be +// activated later via UpdatedRemotePeerConfig. func (e *ConnMgr) Start(ctx context.Context) { if e.lazyConnMgr != nil { - log.Errorf("lazy connection manager is already started") + log.Errorf("lazy/dynamic connection manager is already started") return } - - if !e.enabledLocally { - log.Infof("lazy connection manager is disabled") + if !modeUsesLazyMgr(e.mode) { + log.Infof("lazy/dynamic connection manager is disabled (mode=%s)", e.mode) return } - if e.rosenpassEnabled { - log.Warnf("rosenpass connection manager is enabled, lazy connection manager will not be started") + log.Warnf("rosenpass enabled, lazy/dynamic connection manager will not be started") return } - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) + e.startModeSideEffects() } -// UpdatedRemoteFeatureFlag is called when the remote feature flag is updated. -// If enabled, it initializes the lazy connection manager and start it. Do not need to call Start() again. -// If disabled, then it closes the lazy connection manager and open the connections to all peers. -func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { - // do not disable lazy connection manager if it was enabled by env var - if e.enabledLocally { - return nil +// modeUsesLazyMgr is true for the modes whose lifecycle is driven by the +// lazyconn.Manager (which now hosts the two-timer inactivity manager +// since Phase 2). Eager modes (p2p, relay-forced) do not need it. +func modeUsesLazyMgr(m connectionmode.Mode) bool { + return m == connectionmode.ModeP2PLazy || m == connectionmode.ModeP2PDynamic +} + +// startModeSideEffects flips the per-mode goroutines and status flags +// that need to follow a successful initLazyManager. Called by Start() +// and by the management-push transition path. +func (e *ConnMgr) startModeSideEffects() { + // Both lazy AND dynamic are "lazy" from the status-recorder's + // perspective (peers are not eagerly opened; they wait for activity). + // The "Lazy connection: true/false" line in `netbird status` reflects + // this user-visible distinction, not the internal flavor. + if e.mode == connectionmode.ModeP2PLazy || e.mode == connectionmode.ModeP2PDynamic { + e.statusRecorder.UpdateLazyConnection(true) + } + if e.mode == connectionmode.ModeP2PDynamic { + e.wg.Add(1) + go func() { + defer e.wg.Done() + e.runDynamicInactivityLoop(e.lazyCtx) + }() } +} - if enabled { - // if the lazy connection manager is already started, do not start it again - if e.lazyConnMgr != nil { - return nil +// runDynamicInactivityLoop reads from the two-timer inactivity channels +// exposed by the inactivity.Manager and dispatches per-peer teardown. +// +// ICEInactiveChan: detach the ICE worker for each listed peer; the +// relay tunnel is left running so traffic still flows. +// +// RelayInactiveChan: close the whole connection. The activity-detector +// will reopen it when the next outbound packet arrives. +// +// Only meaningful in p2p-dynamic mode; in p2p-lazy the iceTimeout is 0 +// and ICEInactiveChan never fires, so the loop is a passthrough. +func (e *ConnMgr) runDynamicInactivityLoop(ctx context.Context) { + if e.lazyConnMgr == nil { + return + } + im := e.lazyConnMgr.InactivityManager() + if im == nil { + return + } + log.Infof("p2p-dynamic inactivity loop started (iceTimeout=%ds, relayTimeout=%ds)", e.p2pTimeoutSecs, e.relayTimeoutSecs) + defer log.Infof("p2p-dynamic inactivity loop stopped") + for { + select { + case <-ctx.Done(): + return + case peers := <-im.ICEInactiveChan(): + for peerKey := range peers { + if err := e.DetachICEForPeer(peerKey); err != nil { + log.Warnf("DetachICEForPeer(%s): %v", peerKey, err) + } + } + case peers := <-im.RelayInactiveChan(): + for peerKey := range peers { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.Log.Infof("relay-inactivity timeout, closing peer connection") + // Lazy-suspend: keep the WG peer entry so routed- + // subnet AllowedIPs (e.g. 192.168.91.0/24 via this + // routing peer) survive the wake/sleep cycle. + // Otherwise routed traffic to the prefix would not + // match any peer and silently drop until the next + // reconcile (see docs/bugs/2026-05-04-...md). + conn.Close(false, true) + } + } } + } +} - if e.rosenpassEnabled { - log.Infof("rosenpass connection manager is enabled, lazy connection manager will not be started") - return nil +// UpdatedRemotePeerConfig is called when the management server pushes a +// new PeerConfig. Re-resolves the effective mode through the precedence +// chain and starts/stops the lazy manager accordingly. +func (e *ConnMgr) UpdatedRemotePeerConfig(ctx context.Context, pc *mgmProto.PeerConfig) error { + // Capture the raw server-pushed values before resolution so the UI + // can surface them independently of any local override. + if pc != nil { + serverMode := connectionmode.FromProto(pc.GetConnectionMode()) + if serverMode == connectionmode.ModeUnspecified { + serverMode = connectionmode.ResolveLegacyLazyBool(pc.GetLazyConnectionEnabled()) } + e.spMu.Lock() + e.serverPushedMode = serverMode + e.serverPushedRelayTimeoutSecs = pc.GetRelayTimeoutSeconds() + e.serverPushedP2pTimeoutSecs = pc.GetP2PTimeoutSeconds() + e.serverPushedP2pRetryMaxSecs = pc.GetP2PRetryMaxSeconds() + e.spMu.Unlock() + } - log.Warnf("lazy connection manager is enabled by management feature flag") - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) - return e.addPeersToLazyConnManager() - } else { - if e.lazyConnMgr == nil { - return nil - } - log.Infof("lazy connection manager is disabled by management feature flag") + newMode, newRelay, newP2P, newP2pRetry := resolveConnectionMode( + e.envMode, e.envRelayTimeout, e.cfgMode, e.cfgRelayTimeout, + e.cfgP2pTimeout, e.cfgP2pRetryMax, pc, + ) + + if newMode == e.mode && newRelay == e.relayTimeoutSecs && + newP2P == e.p2pTimeoutSecs && newP2pRetry == e.p2pRetryMaxSecs { + return nil + } + prev := e.mode + e.mode = newMode + e.relayTimeoutSecs = newRelay + e.p2pTimeoutSecs = newP2P + e.p2pRetryMaxSecs = newP2pRetry + e.propagateP2pRetryMaxToConns() + + wasManaged := modeUsesLazyMgr(prev) + isManaged := modeUsesLazyMgr(newMode) + modeChanged := prev != newMode + + if modeChanged && wasManaged && !isManaged { + log.Infof("lazy/dynamic connection manager disabled by management push (mode=%s)", newMode) e.closeManager(ctx) e.statusRecorder.UpdateLazyConnection(false) return nil } + + if modeChanged && wasManaged && isManaged { + // Switching between lazy and dynamic at runtime: tear down the + // existing manager so initLazyManager picks up the new timeouts. + log.Infof("lazy/dynamic mode change %s -> %s, restarting manager", prev, newMode) + e.closeManager(ctx) + e.statusRecorder.UpdateLazyConnection(false) + } + + if isManaged && e.lazyConnMgr == nil { + if e.rosenpassEnabled { + log.Warnf("rosenpass enabled, ignoring lazy/dynamic mode push") + return nil + } + log.Infof("lazy/dynamic connection manager enabled by management push (mode=%s)", newMode) + e.initLazyManager(ctx) + e.startModeSideEffects() + // Phase 3.7i: when management activates lazy/dynamic mode at + // runtime we must reset all existing peer connections through + // the lazy/idle entry. The previous AddActivePeers path kept + // every already-open WireGuard tunnel running and only started + // the inactivity timers from "now" -- callers expected the new + // mode to apply immediately ("Idle until traffic"), not "stay + // open until 3 hours from now". Brief packet loss (~1-2 s per + // peer while the tunnel is rebuilt) is acceptable; mode changes + // are rare and almost always intentional. + return e.resetPeersToLazyIdle(ctx) + } + return nil +} + +// UpdatedRemoteFeatureFlag is the legacy entry point that only knows the +// boolean LazyConnectionEnabled field. Kept as a thin shim that builds a +// synthetic PeerConfig and delegates to UpdatedRemotePeerConfig. +// +// Deprecated: callers should switch to UpdatedRemotePeerConfig and pass +// the real PeerConfig so the new ConnectionMode + timeouts propagate. +func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { + return e.UpdatedRemotePeerConfig(ctx, &mgmProto.PeerConfig{LazyConnectionEnabled: enabled}) } // UpdateRouteHAMap updates the route HA mappings in the lazy connection manager @@ -163,6 +407,11 @@ func (e *ConnMgr) AddPeerConn(ctx context.Context, peerKey string, conn *peer.Co return true } + // Wire WG-timeout recovery so the peer is pushed back to lazy-idle + // (activity listener restarted) when WireGuard handshakes time out. + // Closes over peerKey so the callback is independent of conn state. + conn.SetOnWGTimeoutRecover(func() { e.RecoverPeerToIdle(peerKey) }) + if !e.isStartedWithLazyMgr() { if err := conn.Open(ctx); err != nil { conn.Log.Errorf("failed to open connection: %v", err) @@ -210,7 +459,9 @@ func (e *ConnMgr) RemovePeerConn(peerKey string) { if !ok { return } - defer conn.Close(false) + // Permanent removal: drop the WG peer entry too. If we kept it the + // stale entry would linger in WG until the next full reconcile. + defer conn.Close(false, false) if !e.isStartedWithLazyMgr() { return @@ -230,19 +481,113 @@ func (e *ConnMgr) ActivatePeer(ctx context.Context, conn *peer.Conn) { conn.Log.Errorf("failed to open connection: %v", err) } } + + // p2p-dynamic: re-attach ICE on EVERY signal trigger, not only on + // the lazy-manager's first activity edge. The runDynamicInactivityLoop + // path (DetachICEForPeer when iceTimeout fires) leaves the peer in an + // "inactivity-with-ICE-detached" sub-state that the lazy manager does + // not represent. Without this re-arm, subsequent remote OFFERs would + // reach handshaker.Listen() with iceListener==nil and be silently + // dropped, leaving the peer stuck on relay even though both sides + // are signaling normally. AttachICE is idempotent (no-op if listener + // already attached) and honors iceBackoff.IsSuspended() so the + // failure-backoff is not bypassed. + if e.mode == connectionmode.ModeP2PDynamic { + if err := conn.AttachICE(); err != nil { + conn.Log.Warnf("AttachICE on signal activity: %v", err) + } + } +} + +// deactivateAction selects what DeactivatePeer should do when the remote +// peer signals GO_IDLE. The dispatch is a pure function of the locally +// resolved connection mode. +type deactivateAction int + +const ( + deactivateNoop deactivateAction = iota + deactivateLazy + deactivateICE +) + +// deactivatePeerAction returns the per-mode deactivation rule. Eager +// modes (p2p, relay-forced, unspecified) ignore GO_IDLE because they +// are meant to keep tunnels always-on. p2p-lazy delegates to the lazy +// connection manager so the whole tunnel is torn down. p2p-dynamic +// detaches only the ICE worker so the relay tunnel stays up. +func (e *ConnMgr) deactivatePeerAction() deactivateAction { + switch e.mode { + case connectionmode.ModeP2PLazy: + return deactivateLazy + case connectionmode.ModeP2PDynamic: + return deactivateICE + default: + return deactivateNoop + } } -// DeactivatePeer deactivates a peer connection in the lazy connection manager. -// If locally the lazy connection is disabled, we force the peer connection open. +// DeactivatePeer is invoked when the remote peer signals GO_IDLE. The +// behavior is per-mode (see deactivatePeerAction). Phase 2 fix for the +// lazy/eager mismatch in #5989: previously this method silently no-op'd +// whenever the local manager was not in lazy mode, so a remote lazy +// peer's GO_IDLE was effectively dropped and the eager local end kept +// the peer awake. func (e *ConnMgr) DeactivatePeer(conn *peer.Conn) { - if !e.isStartedWithLazyMgr() { + switch e.deactivatePeerAction() { + case deactivateLazy: + if !e.isStartedWithLazyMgr() { + return + } + conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") + e.lazyConnMgr.DeactivatePeer(conn.ConnID()) + case deactivateICE: + conn.Log.Infof("detaching ICE worker: remote peer signaled GO_IDLE (p2p-dynamic)") + if err := e.DetachICEForPeer(conn.GetKey()); err != nil { + conn.Log.Warnf("DetachICEForPeer failed: %v", err) + } + case deactivateNoop: + // Eager modes keep the tunnel up unconditionally. return } +} - conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") +// RecoverPeerToIdle pushes a peer back into the lazy manager's +// activity-listening idle state after the local WireGuard handshake +// has timed out. Without this, the peer stays stuck in "Connecting" +// forever (lazy mgr keeps it in active set with no activity listener, +// so subsequent local traffic is silently dropped). Codex follow-up. +// +// Safe to call when the lazy mgr is disabled or the peer is unknown: +// both cases short-circuit silently. The lazy mgr's DeactivatePeer +// also ignores peers that are not in the active state, so duplicate +// invocations (e.g. WG timeout twice) are no-ops. +func (e *ConnMgr) RecoverPeerToIdle(peerKey string) { + if !e.isStartedWithLazyMgr() { + return + } + conn, ok := e.peerStore.PeerConn(peerKey) + if !ok { + return + } + conn.Log.Infof("WG timeout recovery: pushing peer back to lazy-idle (activity listener will rearm)") e.lazyConnMgr.DeactivatePeer(conn.ConnID()) } +// DetachICEForPeer looks up the Conn for peerKey and tears down its +// ICE worker without touching the relay tunnel. Used by: +// - DeactivatePeer when the remote peer sends GO_IDLE (p2p-dynamic) +// - the inactivity manager when the iceTimeout elapses (wired in +// engine.go runDynamicInactivityLoop) +// +// Missing peers are not an error; they may have been removed concurrently. +func (e *ConnMgr) DetachICEForPeer(peerKey string) error { + conn, ok := e.peerStore.PeerConn(peerKey) + if !ok { + return nil + } + return conn.DetachICE() +} + func (e *ConnMgr) Close() { if !e.isStartedWithLazyMgr() { return @@ -257,6 +602,12 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { cfg := manager.Config{ InactivityThreshold: inactivityThresholdEnv(), } + if e.relayTimeoutSecs > 0 { + cfg.RelayInactivityThreshold = time.Duration(e.relayTimeoutSecs) * time.Second + } + if e.mode == connectionmode.ModeP2PDynamic && e.p2pTimeoutSecs > 0 { + cfg.ICEInactivityThreshold = time.Duration(e.p2pTimeoutSecs) * time.Second + } e.lazyConnMgr = manager.NewManager(cfg, engineCtx, e.peerStore, e.iface) e.lazyCtx, e.lazyCtxCancel = context.WithCancel(engineCtx) @@ -268,6 +619,34 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { }() } +// propagateP2pRetryMaxToConns iterates all active Conn instances and +// updates their iceBackoff.SetMaxBackoff. Called when the server pushes +// a new value via UpdatedRemotePeerConfig. Phase 3 of #5989. +func (e *ConnMgr) propagateP2pRetryMaxToConns() { + const sentinelDisabled = ^uint32(0) + v := e.p2pRetryMaxSecs + var d time.Duration + switch v { + case sentinelDisabled: + d = 0 // user-explicit disable + case 0: + d = peer.DefaultP2PRetryMax // server NULL -> use daemon default + default: + d = time.Duration(v) * time.Second + } + for _, peerKey := range e.peerStore.PeersPubKey() { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.SetIceBackoffMax(d) + } + } +} + +// addPeersToLazyConnManager is currently unused (callers were migrated +// to per-peer activation in Phase 2 of #5989). Kept for reference and +// for the eventual full-batch wakeup path; revisit when the lazyconn +// manager grows a snapshot-import API. +// +//nolint:unused // see comment above func (e *ConnMgr) addPeersToLazyConnManager() error { peers := e.peerStore.PeersPubKey() lazyPeerCfgs := make([]lazyconn.PeerConfig, 0, len(peers)) @@ -291,6 +670,68 @@ func (e *ConnMgr) addPeersToLazyConnManager() error { return e.lazyConnMgr.AddActivePeers(lazyPeerCfgs) } +// resetPeersToLazyIdle closes every currently-open peer connection and +// re-registers it via the standard AddPeer (idle) entry of the lazy +// manager. Used when management activates lazy/dynamic mode at runtime: +// without this, AddActivePeers would keep all existing tunnels running +// until their inactivity timers fired, contradicting the user-visible +// promise of lazy/dynamic ("idle until traffic"). +// +// Peers with daemon versions that don't support lazy connection, peers +// on the exclude list, and any AddPeer error fall back to eager Open() +// to preserve current behaviour for those edge cases. Net effect for +// the common case: every supported peer flips from Connected -> Idle +// and waits for the next outbound payload packet. +func (e *ConnMgr) resetPeersToLazyIdle(ctx context.Context) error { + for _, peerID := range e.peerStore.PeersPubKey() { + peerConn, ok := e.peerStore.PeerConn(peerID) + if !ok { + log.Warnf("failed to find peer conn for peerID: %s", peerID) + continue + } + + // Tear the tunnel down. signalToRemote=true so the remote peer + // also drops its half (otherwise it would keep the tunnel half- + // open until its own ICE backoff fired). keepWgPeer=false: this + // is a mode-change full reopen, not a lazy-suspend; the peer + // will be re-Opened (or re-AddPeerConn'd) right below with a + // fresh AllowedIP set from the new mode's PeerConfig. + peerConn.Close(true, false) + + if !lazyconn.IsSupported(peerConn.AgentVersionString()) { + peerConn.Log.Warnf("peer does not support lazy connection (%s), opening permanent connection after mode reset", peerConn.AgentVersionString()) + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open connection after mode reset: %v", err) + } + continue + } + + lazyPeerCfg := lazyconn.PeerConfig{ + PublicKey: peerID, + AllowedIPs: peerConn.WgConfig().AllowedIps, + PeerConnID: peerConn.ConnID(), + Log: peerConn.Log, + } + excluded, err := e.lazyConnMgr.AddPeer(lazyPeerCfg) + if err != nil { + peerConn.Log.Errorf("failed to add peer to lazy conn manager during mode reset: %v", err) + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open connection after AddPeer error: %v", err) + } + continue + } + if excluded { + peerConn.Log.Infof("peer is on lazy conn manager exclude list, opening connection after mode reset") + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open excluded peer after mode reset: %v", err) + } + continue + } + peerConn.Log.Infof("peer reset to idle by lazy/dynamic mode change") + } + return nil +} + func (e *ConnMgr) closeManager(ctx context.Context) { if e.lazyConnMgr == nil { return @@ -309,6 +750,79 @@ func (e *ConnMgr) isStartedWithLazyMgr() bool { return e.lazyConnMgr != nil && e.lazyCtxCancel != nil } +// Mode returns the currently resolved connection mode. Used by the engine +// when constructing per-peer connections (Phase 1 forwards it into +// peer.ConnConfig in a follow-up commit). +func (e *ConnMgr) Mode() connectionmode.Mode { + return e.mode +} + +// RelayTimeout returns the resolved relay-worker idle timeout in seconds. +func (e *ConnMgr) RelayTimeout() uint32 { + return e.relayTimeoutSecs +} + +// P2pRetryMax returns the resolved cap in seconds for the ICE-failure +// backoff schedule. Wire-format sentinel uint32-max means "user-explicit +// disable"; callers must translate that to 0. Phase 3 of #5989. +func (e *ConnMgr) P2pRetryMax() uint32 { + return e.p2pRetryMaxSecs +} + +// P2pTimeout returns the resolved ICE-only inactivity timeout in +// seconds. Phase 2 of #5989. 0 = ICE never times out (for non-dynamic +// modes). Phase 3.7i adds this accessor so the engine can include it +// in PeerSystemMeta. +func (e *ConnMgr) P2pTimeout() uint32 { + return e.p2pTimeoutSecs +} + +// ServerPushedMode returns the connection mode the management server +// most recently pushed via PeerConfig (independent of any local env +// or config override). Returns ModeUnspecified if no PeerConfig has +// been received yet. Used by the Android UI to display "Follow server +// (currently: )" in the override dropdown. +func (e *ConnMgr) ServerPushedMode() connectionmode.Mode { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedMode +} + +// ServerPushedRelayTimeoutSecs returns the relay-worker idle-timeout +// (seconds) most recently pushed by the management server, or 0 if no +// PeerConfig has been received. Used by the Android UI as a hint in +// the override field. +func (e *ConnMgr) ServerPushedRelayTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedRelayTimeoutSecs +} + +// ServerPushedP2pTimeoutSecs returns the ICE-only inactivity timeout +// (seconds) most recently pushed by the management server. Only +// meaningful in p2p-dynamic mode. +func (e *ConnMgr) ServerPushedP2pTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedP2pTimeoutSecs +} + +// ServerPushedP2pRetryMaxSecs returns the ICE-failure backoff cap +// (seconds) most recently pushed by the management server. When the +// server has not pushed a value (Phase 1 management servers do not +// know about this field yet) the built-in DefaultP2PRetryMax is +// returned so the Android UI hint shows what value the daemon is +// actually using as fallback. +func (e *ConnMgr) ServerPushedP2pRetryMaxSecs() uint32 { + e.spMu.RLock() + v := e.serverPushedP2pRetryMaxSecs + e.spMu.RUnlock() + if v > 0 { + return v + } + return uint32(peer.DefaultP2PRetryMax / time.Second) +} + func inactivityThresholdEnv() *time.Duration { envValue := os.Getenv(lazyconn.EnvInactivityThreshold) if envValue == "" { diff --git a/client/internal/conn_mgr_test.go b/client/internal/conn_mgr_test.go new file mode 100644 index 00000000000..21f0c93d523 --- /dev/null +++ b/client/internal/conn_mgr_test.go @@ -0,0 +1,221 @@ +package internal + +import ( + "testing" + + "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestResolveConnectionMode(t *testing.T) { + cases := []struct { + name string + envMode connectionmode.Mode + envTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + serverPC *mgmProto.PeerConfig + wantMode connectionmode.Mode + wantRelay uint32 + wantP2P uint32 + }{ + { + name: "all unspecified, server says legacy false -> P2P", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: false}, + wantMode: connectionmode.ModeP2P, + }, + { + name: "all unspecified, server says legacy true -> P2P_LAZY", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: true}, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "server pushes new enum -> wins over legacy bool", + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + LazyConnectionEnabled: false, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "client config overrides server", + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "follow-server in client config clears local override", + cfgMode: connectionmode.ModeFollowServer, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "env var beats client config", + envMode: connectionmode.ModeRelayForced, + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "env timeout beats server timeout", + envTimeout: 42, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 100}, + wantMode: connectionmode.ModeP2P, + wantRelay: 42, + }, + { + name: "client config timeout beats server", + cfgRelayTimeout: 50, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 200}, + wantMode: connectionmode.ModeP2P, + wantRelay: 50, + }, + { + name: "no env, no client, only server timeout", + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 300}, + wantMode: connectionmode.ModeP2P, + wantRelay: 300, + }, + { + name: "nil serverPC defaults to P2P", + serverPC: nil, + wantMode: connectionmode.ModeP2P, + }, + { + name: "p2p-dynamic with server-pushed timeouts", + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 10800, RelayTimeoutSeconds: 86400}, + wantMode: connectionmode.ModeP2PDynamic, wantRelay: 86400, wantP2P: 10800, + }, + { + name: "client config p2p-timeout beats server", + cfgP2pTimeout: 555, + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 9999}, + wantMode: connectionmode.ModeP2PDynamic, wantP2P: 555, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + gotMode, gotRelay, gotP2P, _ := resolveConnectionMode(c.envMode, c.envTimeout, c.cfgMode, c.cfgRelayTimeout, c.cfgP2pTimeout, 0, c.serverPC) + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotRelay != c.wantRelay { + t.Errorf("relay-timeout = %v, want %v", gotRelay, c.wantRelay) + } + if gotP2P != c.wantP2P { + t.Errorf("p2p-timeout = %v, want %v", gotP2P, c.wantP2P) + } + }) + } +} + +func TestResolveConnectionMode_P2pRetryMax_NotSet(t *testing.T) { + // serverPC has 0 (= "not set") -> result is 0, daemon will use default + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 0, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Errorf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 0 { + t.Errorf("server-pushed 0 should pass through as 0, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ServerSet(t *testing.T) { + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 600, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Fatalf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 600 { + t.Errorf("server-pushed 600 should win, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ClientCfgWins(t *testing.T) { + _, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, + 300, // cfgP2pRetryMax (client-side override) + &mgmProto.PeerConfig{ + P2PRetryMaxSeconds: 600, + }, + ) + if retryMax != 300 { + t.Errorf("client cfg should override server push, got %d", retryMax) + } +} + +// TestConnMgr_DetachICEForPeer_NotFound verifies that detaching ICE +// for a peer not in the store is a no-op (no error). The lookup miss +// can happen if a peer is removed concurrently with a GO_IDLE signal +// or an inactivity-manager fire. +func TestConnMgr_DetachICEForPeer_NotFound(t *testing.T) { + mgr := &ConnMgr{peerStore: peerstore.NewConnStore()} + + if err := mgr.DetachICEForPeer("unknown-peer-key"); err != nil { + t.Fatalf("DetachICEForPeer for unknown peer should be no-op, got %v", err) + } +} + +// TestConnMgr_deactivatePeerAction verifies the per-mode dispatch rule: +// p2p-dynamic detaches ICE, p2p-lazy delegates to the lazy manager, +// eager modes (p2p, relay-forced) are silent no-ops. This is the core +// fix for the lazy/eager mismatch (Phase 2 #5989). +func TestConnMgr_deactivatePeerAction(t *testing.T) { + cases := []struct { + mode connectionmode.Mode + want deactivateAction + }{ + {connectionmode.ModeP2P, deactivateNoop}, + {connectionmode.ModeRelayForced, deactivateNoop}, + {connectionmode.ModeUnspecified, deactivateNoop}, + {connectionmode.ModeP2PLazy, deactivateLazy}, + {connectionmode.ModeP2PDynamic, deactivateICE}, + } + for _, c := range cases { + t.Run(c.mode.String(), func(t *testing.T) { + mgr := &ConnMgr{mode: c.mode} + if got := mgr.deactivatePeerAction(); got != c.want { + t.Errorf("mode=%v action=%v want=%v", c.mode, got, c.want) + } + }) + } +} + +func TestConnMgr_ServerPushedFieldsAreRaceSafe(t *testing.T) { + cm := &ConnMgr{} + done := make(chan struct{}) + go func() { + for i := 0; i < 1000; i++ { + cm.spMu.Lock() + cm.serverPushedRelayTimeoutSecs = uint32(i) + cm.spMu.Unlock() + } + close(done) + }() + for i := 0; i < 1000; i++ { + _ = cm.ServerPushedRelayTimeoutSecs() + } + <-done +} diff --git a/client/internal/conn_state_pusher.go b/client/internal/conn_state_pusher.go new file mode 100644 index 00000000000..21a36484950 --- /dev/null +++ b/client/internal/conn_state_pusher.go @@ -0,0 +1,446 @@ +package internal + +import ( + "context" + "crypto/rand" + "encoding/binary" + "sync" + "sync/atomic" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// PeerStateChangeEvent is the per-peer connection-state snapshot the +// pusher receives from the engine. Phase 3.7i of #5989. +type PeerStateChangeEvent struct { + Pubkey string + ConnType mgmProto.ConnType + LastHandshake time.Time + LatencyMS uint32 + Endpoint string + RelayServer string + RxBytes uint64 + TxBytes uint64 +} + +// PushSink is the upstream Sync mgmt-client interface the pusher writes +// to. The Engine's mgmClient.SyncPeerConnections satisfies it. +type PushSink interface { + Push(ctx context.Context, m *mgmProto.PeerConnectionMap) error +} + +// PeerStateSource produces the current full snapshot of per-peer state +// when the pusher needs to compute a delta or build a full snapshot. +// The Engine's statusRecorder snapshot satisfies it. +type PeerStateSource interface { + SnapshotAllRemotePeers() []PeerStateChangeEvent +} + +type pusherTuning struct { + baseInterval time.Duration + maxInterval time.Duration + doubleAfter int +} + +var defaultTuning = pusherTuning{ + baseInterval: 60 * time.Second, + maxInterval: 300 * time.Second, + doubleAfter: 3, +} + +type connStatePusher struct { + sink PushSink + source PeerStateSource + tuning pusherTuning + + // sessionID is generated once per process; mgmt uses it to detect a + // daemon restart even if a stale unary RPC from the previous process + // arrives AFTER the new process's full snapshot. Codex follow-up to + // PR review of Phase 3.7i. + sessionID uint64 + + // disabled is set true once the management server has rejected the + // SyncPeerConnections RPC with codes.Unimplemented. Old mgmt servers + // don't ship the new RPC at all; without this latch the pusher would + // keep retrying every heartbeat (60 s) and on every state change, + // burning wakeups and gRPC retries against a server that will never + // accept the call. Detected on the first push, then no further pushes + // are attempted for the lifetime of this pusher (i.e. until the next + // daemon restart, which gets a fresh detection cycle). Codex review + // of Phase 3.7i. + disabled atomic.Bool + + mu sync.Mutex + lastPushed map[string]PeerStateChangeEvent + seq uint64 + + events chan PeerStateChangeEvent + snapshotReq chan uint64 + initialReady chan struct{} // closed by TriggerInitialSnapshot + stop chan struct{} + wg sync.WaitGroup +} + +func newConnStatePusher(sink PushSink, source PeerStateSource) *connStatePusher { + return newConnStatePusherForTest(sink, source, defaultTuning) +} + +func newConnStatePusherForTest(sink PushSink, source PeerStateSource, t pusherTuning) *connStatePusher { + p := &connStatePusher{ + sink: sink, + source: source, + tuning: t, + sessionID: newSessionID(), + lastPushed: make(map[string]PeerStateChangeEvent), + events: make(chan PeerStateChangeEvent, 64), + snapshotReq: make(chan uint64, 4), + initialReady: make(chan struct{}), + stop: make(chan struct{}), + } + p.wg.Add(1) + go p.loop() + return p +} + +// newSessionID returns a random non-zero uint64. Zero is reserved as +// the "legacy / unset" sentinel mgmt falls back to seq-only behaviour +// for, so we re-roll on the (cryptographically negligible) chance of +// drawing it. +func newSessionID() uint64 { + var b [8]byte + for { + _, _ = rand.Read(b[:]) + if id := binary.BigEndian.Uint64(b[:]); id != 0 { + return id + } + } +} + +// Stop cancels the loop goroutine and blocks until it exits. Idempotent +// at the close-channel level (calling Stop twice panics — caller's +// responsibility to call once). +func (p *connStatePusher) Stop() { + close(p.stop) + p.wg.Wait() +} + +// OnPeerStateChange enqueues a state-change event. Non-blocking — drops +// if the buffer is full (the next bulk tick will catch up via delta). +// +// Safe on a nil receiver: Engine.Stop nils e.connStatePusher before +// removeAllPeers runs, but the status-recorder listener registered in +// Engine.Start is still wired and may fire a few more events during +// peer cleanup. A nil-receiver no-op makes the cleanup path cheap and +// avoids a panic on the engine shutdown race. +func (p *connStatePusher) OnPeerStateChange(ev PeerStateChangeEvent) { + if p == nil { + return + } + select { + case p.events <- ev: + default: + } +} + +// OnSnapshotRequest enqueues a snapshot-request nonce. Non-blocking, +// coalescing — multiple requests in flight result in a single full +// snapshot with the latest nonce echoed. Nil-receiver safe for the +// same shutdown-race reason as OnPeerStateChange. +func (p *connStatePusher) OnSnapshotRequest(nonce uint64) { + if p == nil { + return + } + select { + case p.snapshotReq <- nonce: + default: + } +} + +// TriggerInitialSnapshot signals the loop that the engine has populated +// the peer-state source for the first time and the loop may now send +// its initial full snapshot to management. Idempotent — subsequent +// calls are no-ops. +// +// Without this, newConnStatePusher's loop would race with the engine's +// peer-population path: starting in engine.Start (before addNewPeers +// has run for the first NetworkMap), it would emit an empty snapshot, +// and management would not see real peers until either a state change +// or the 60 s heartbeat tick. +func (p *connStatePusher) TriggerInitialSnapshot() { + p.mu.Lock() + defer p.mu.Unlock() + select { + case <-p.initialReady: + // already triggered + default: + close(p.initialReady) + } +} + +func (p *connStatePusher) loop() { + defer p.wg.Done() + // Wait until the engine signals that the first NetworkMap has been + // applied (peers populated). Sending an initial full snapshot before + // peers exist would publish an empty map to management, which would + // only get repaired on the next per-peer state change or after the + // 60 s heartbeat. Bail out cleanly if Stop is called first. + select { + case <-p.initialReady: + case <-p.stop: + return + } + if p.source != nil { + // Codex#4: drain any per-peer events that landed in p.events + // BEFORE initialReady fired. Those events reflect state that + // the upcoming flushFull (which calls SnapshotAllRemotePeers) + // will already cover; replaying them as later deltas would + // make the management server see them out of order (an old + // delta arriving AFTER a snapshot at higher seq). + // Only drain when we ARE going to send a snapshot — otherwise + // pre-init events are still valid state changes that need to + // flow through the normal delta path. + drainLoop: + for { + select { + case <-p.events: + // discard + default: + break drainLoop + } + } + p.flushFull(p.source.SnapshotAllRemotePeers(), 0) + } + interval := p.tuning.baseInterval + emptyTicks := 0 + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + select { + case <-p.stop: + return + case ev := <-p.events: + batch := []PeerStateChangeEvent{ev} + drain := true + for drain { + select { + case e2 := <-p.events: + batch = append(batch, e2) + default: + drain = false + } + } + p.flushDelta(batch) + interval = p.tuning.baseInterval + emptyTicks = 0 + timer.Reset(interval) + case nonce := <-p.snapshotReq: + if p.source != nil { + p.flushFull(p.source.SnapshotAllRemotePeers(), nonce) + } + interval = p.tuning.baseInterval + emptyTicks = 0 + timer.Reset(interval) + case <-timer.C: + delta := p.computeDeltaFromSource() + if len(delta) > 0 { + p.flushDelta(delta) + interval = p.tuning.baseInterval + emptyTicks = 0 + } else { + emptyTicks++ + if emptyTicks >= p.tuning.doubleAfter && interval < p.tuning.maxInterval { + interval *= 2 + if interval > p.tuning.maxInterval { + interval = p.tuning.maxInterval + } + emptyTicks = 0 + } + } + timer.Reset(interval) + } + } +} + +func (p *connStatePusher) flushDelta(events []PeerStateChangeEvent) { + if len(events) == 0 { + return + } + if p.disabled.Load() { + // Mgmt server is pre-3.7i and rejected SyncPeerConnections with + // Unimplemented earlier in this session. Mark events as pushed so + // the dirty-state computation doesn't keep re-flagging them and + // retrying every tick. + p.markPushed(events) + return + } + p.mu.Lock() + p.seq++ + seq := p.seq + p.mu.Unlock() + entries := make([]*mgmProto.PeerConnectionEntry, 0, len(events)) + for _, ev := range events { + entries = append(entries, eventToEntry(ev)) + } + if err := p.sink.Push(context.Background(), &mgmProto.PeerConnectionMap{ + Seq: seq, + FullSnapshot: false, + Entries: entries, + SessionId: p.sessionID, + }); err != nil { + if p.handleUnimplemented(err) { + // Old server: mark these as pushed so we don't keep retrying. + p.markPushed(events) + return + } + // Push failed (mgmt reconnect, transient gRPC error, etc.). + // Do NOT mark these events as lastPushed -- on the next tick + // the dirty-state computation will re-include them so the + // management server eventually catches up. Without this, a + // peer that flipped state during a brief mgmt outage would + // stay stale until its next state change or the 60 s heartbeat. + return + } + p.markPushed(events) +} + +// markPushed records the events as the latest known mgmt-side state. +// Pulled out so the disabled and success paths share the same locking. +func (p *connStatePusher) markPushed(events []PeerStateChangeEvent) { + p.mu.Lock() + for _, ev := range events { + p.lastPushed[ev.Pubkey] = ev + } + p.mu.Unlock() +} + +// handleUnimplemented inspects an error from the sink and, if it looks +// like the mgmt server doesn't implement SyncPeerConnections, latches +// the pusher into the disabled state and logs once. Returns true if the +// error was Unimplemented (caller should treat as "don't retry"); false +// otherwise (caller should keep dirty so the next tick retries). +func (p *connStatePusher) handleUnimplemented(err error) bool { + if err == nil { + return false + } + st, ok := status.FromError(err) + if !ok || st.Code() != codes.Unimplemented { + return false + } + // CompareAndSwap so the log line and the warn-once message only fire + // the first time we hit Unimplemented in this pusher's lifetime. + if p.disabled.CompareAndSwap(false, true) { + log.Warnf("management server does not implement SyncPeerConnections (Phase 3.7i feature); peer-connection-state push disabled for this session — peer state UI on other clients may be less detailed but the daemon is unaffected") + } + return true +} + +func (p *connStatePusher) flushFull(events []PeerStateChangeEvent, inResponseToNonce uint64) { + if p.disabled.Load() { + // Mgmt is pre-3.7i; mark seen so we don't retry on every snapshot + // request. The mgmt-side store will not have any of our entries, + // but other clients' UIs will fall back to their pre-3.7i + // heuristics for our peer (legacy ConnStatus path on PeerState). + p.markPushed(events) + return + } + p.mu.Lock() + p.seq++ + seq := p.seq + p.mu.Unlock() + entries := make([]*mgmProto.PeerConnectionEntry, 0, len(events)) + for _, ev := range events { + entries = append(entries, eventToEntry(ev)) + } + if err := p.sink.Push(context.Background(), &mgmProto.PeerConnectionMap{ + Seq: seq, + FullSnapshot: true, + Entries: entries, + InResponseToNonce: inResponseToNonce, + SessionId: p.sessionID, + }); err != nil { + if p.handleUnimplemented(err) { + p.markPushed(events) + return + } + // Same dirty-retain semantics as flushDelta. A failed full + // snapshot leaves lastPushed unchanged so the next push (or + // the next snapshot request) will see every peer as dirty. + return + } + p.markPushed(events) +} + +func (p *connStatePusher) computeDeltaFromSource() []PeerStateChangeEvent { + if p.source == nil { + return nil + } + all := p.source.SnapshotAllRemotePeers() + p.mu.Lock() + defer p.mu.Unlock() + delta := make([]PeerStateChangeEvent, 0, len(all)) + for _, ev := range all { + prev, had := p.lastPushed[ev.Pubkey] + if !had || isMaterialChange(prev, ev) { + delta = append(delta, ev) + } + } + return delta +} + +// isMaterialChange decides whether ev's delta vs prev should generate a +// push. Always include conn_type/endpoint flips. Latency: include if +// |delta| >= 5 ms OR the handshake is newer (so any peer that's been +// actively talking AT ALL since the last push is reported, even if +// latency is stable). Phase 3.7i (rev 4 — was AND in rev 3, too +// conservative). +func isMaterialChange(prev, cur PeerStateChangeEvent) bool { + if prev.ConnType != cur.ConnType { + return true + } + if prev.Endpoint != cur.Endpoint { + return true + } + // Codex review: relay-server flips MUST surface immediately. The + // daemon does ship this field in eventToEntry; without including + // it here we'd only push it on a parallel material change, leaving + // dashboards stuck on the old relay-server URL whenever a peer + // migrates between relays. + if prev.RelayServer != cur.RelayServer { + return true + } + const latencyThresholdMS = 5 + d := int32(cur.LatencyMS) - int32(prev.LatencyMS) + if d < 0 { + d = -d + } + if d >= latencyThresholdMS { + return true + } + if cur.LastHandshake.After(prev.LastHandshake) { + return true + } + return false +} + +func eventToEntry(ev PeerStateChangeEvent) *mgmProto.PeerConnectionEntry { + e := &mgmProto.PeerConnectionEntry{ + RemotePubkey: ev.Pubkey, + ConnType: ev.ConnType, + LatencyMs: ev.LatencyMS, + Endpoint: ev.Endpoint, + RelayServer: ev.RelayServer, + RxBytes: ev.RxBytes, + TxBytes: ev.TxBytes, + } + if !ev.LastHandshake.IsZero() { + e.LastHandshake = timestamppb.New(ev.LastHandshake) + } + return e +} diff --git a/client/internal/conn_state_pusher_material_test.go b/client/internal/conn_state_pusher_material_test.go new file mode 100644 index 00000000000..d56c7b11277 --- /dev/null +++ b/client/internal/conn_state_pusher_material_test.go @@ -0,0 +1,89 @@ +package internal + +import ( + "testing" + "time" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Codex review: isMaterialChange did NOT include RelayServer in its +// per-field comparison, so a peer migrating between relay servers +// would not generate an immediate-push event. Lock that in. +func TestIsMaterialChange_RelayServerFlip(t *testing.T) { + ts := time.Date(2026, 5, 3, 20, 0, 0, 0, time.UTC) + prev := PeerStateChangeEvent{ + Pubkey: "peer1", + ConnType: mgmProto.ConnType_CONN_TYPE_RELAYED, + Endpoint: "100.87.61.232:51820", + LastHandshake: ts, + LatencyMS: 20, + RelayServer: "rels://relay1.example:443/relay", + } + cur := prev + cur.RelayServer = "rels://relay2.example:443/relay" + if !isMaterialChange(prev, cur) { + t.Error("RelayServer change must register as material — UI/dashboard relies on this for relay-server flip surfacing") + } +} + +func TestIsMaterialChange_NoChange(t *testing.T) { + ts := time.Date(2026, 5, 3, 20, 0, 0, 0, time.UTC) + ev := PeerStateChangeEvent{ + Pubkey: "peer1", + ConnType: mgmProto.ConnType_CONN_TYPE_P2P, + Endpoint: "100.87.61.232:51820", + LastHandshake: ts, + LatencyMS: 10, + RelayServer: "rels://r.example:443/relay", + } + if isMaterialChange(ev, ev) { + t.Error("identical events must not be material") + } +} + +func TestIsMaterialChange_LatencyBelowThreshold(t *testing.T) { + prev := PeerStateChangeEvent{Pubkey: "p1", LatencyMS: 10} + cur := prev + cur.LatencyMS = 12 // delta = 2, below 5 ms threshold + if isMaterialChange(prev, cur) { + t.Error("2 ms latency change must not be material (threshold = 5 ms)") + } +} + +func TestIsMaterialChange_LatencyAtThreshold(t *testing.T) { + prev := PeerStateChangeEvent{Pubkey: "p1", LatencyMS: 10} + cur := prev + cur.LatencyMS = 15 // delta = 5, at threshold + if !isMaterialChange(prev, cur) { + t.Error("5 ms latency change must be material (threshold = 5 ms inclusive)") + } +} + +func TestIsMaterialChange_HandshakeProgress(t *testing.T) { + t0 := time.Date(2026, 5, 3, 20, 0, 0, 0, time.UTC) + prev := PeerStateChangeEvent{Pubkey: "p1", LastHandshake: t0} + cur := prev + cur.LastHandshake = t0.Add(time.Second) + if !isMaterialChange(prev, cur) { + t.Error("newer handshake must be material (peer is actively talking)") + } +} + +func TestIsMaterialChange_ConnTypeFlip(t *testing.T) { + prev := PeerStateChangeEvent{Pubkey: "p1", ConnType: mgmProto.ConnType_CONN_TYPE_RELAYED} + cur := prev + cur.ConnType = mgmProto.ConnType_CONN_TYPE_P2P + if !isMaterialChange(prev, cur) { + t.Error("conn-type change must always be material") + } +} + +func TestIsMaterialChange_EndpointFlip(t *testing.T) { + prev := PeerStateChangeEvent{Pubkey: "p1", Endpoint: "1.2.3.4:51820"} + cur := prev + cur.Endpoint = "5.6.7.8:51820" + if !isMaterialChange(prev, cur) { + t.Error("endpoint change must always be material (NAT roam, P2P/Relay flip)") + } +} diff --git a/client/internal/conn_state_pusher_test.go b/client/internal/conn_state_pusher_test.go new file mode 100644 index 00000000000..1d2deb0bef4 --- /dev/null +++ b/client/internal/conn_state_pusher_test.go @@ -0,0 +1,225 @@ +package internal + +import ( + "context" + "sync" + "testing" + "time" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type stubPushSink struct { + mu sync.Mutex + pushes []*mgmProto.PeerConnectionMap + notif chan struct{} +} + +func newStubSink() *stubPushSink { return &stubPushSink{notif: make(chan struct{}, 16)} } + +func (s *stubPushSink) Push(_ context.Context, m *mgmProto.PeerConnectionMap) error { + s.mu.Lock() + s.pushes = append(s.pushes, m) + s.mu.Unlock() + select { + case s.notif <- struct{}{}: + default: + } + return nil +} + +func (s *stubPushSink) waitForPush(t *testing.T, timeout time.Duration) { + t.Helper() + select { + case <-s.notif: + case <-time.After(timeout): + t.Fatal("timed out waiting for push") + } +} + +func (s *stubPushSink) snapshot() []*mgmProto.PeerConnectionMap { + s.mu.Lock() + defer s.mu.Unlock() + out := make([]*mgmProto.PeerConnectionMap, len(s.pushes)) + copy(out, s.pushes) + return out +} + +type stubPeerStateSource struct { + mu sync.Mutex + snapshot []PeerStateChangeEvent +} + +func (s *stubPeerStateSource) set(es []PeerStateChangeEvent) { + s.mu.Lock() + defer s.mu.Unlock() + s.snapshot = es +} + +func (s *stubPeerStateSource) SnapshotAllRemotePeers() []PeerStateChangeEvent { + s.mu.Lock() + defer s.mu.Unlock() + out := make([]PeerStateChangeEvent, len(s.snapshot)) + copy(out, s.snapshot) + return out +} + +func TestConnStatePusher_StateChangeIsPushedImmediately(t *testing.T) { + sink := newStubSink() + p := newConnStatePusher(sink, nil) + defer p.Stop() + // Engine normally does this after the first NetworkMap is applied; + // in unit tests we trigger immediately so the loop unblocks. + p.TriggerInitialSnapshot() + + p.OnPeerStateChange(PeerStateChangeEvent{ + Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, + }) + sink.waitForPush(t, 500*time.Millisecond) + + got := sink.snapshot() + if len(got) != 1 { + t.Fatalf("want 1 push, got %d", len(got)) + } + if got[0].GetFullSnapshot() { + t.Error("state-change push must not be full snapshot") + } +} + +func TestConnStatePusher_NoExtraPushesWhenSnapshotUnchanged(t *testing.T) { + sink := newStubSink() + src := &stubPeerStateSource{} + src.set([]PeerStateChangeEvent{{Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, LatencyMS: 10}}) + p := newConnStatePusherForTest(sink, src, + pusherTuning{baseInterval: 30 * time.Millisecond, maxInterval: 200 * time.Millisecond, doubleAfter: 2}) + defer p.Stop() + p.TriggerInitialSnapshot() + + sink.waitForPush(t, 500*time.Millisecond) + deadline := time.After(200 * time.Millisecond) + for { + select { + case <-deadline: + if got := sink.snapshot(); len(got) != 1 { + t.Fatalf("want exactly 1 push (initial snapshot), got %d", len(got)) + } + return + case <-sink.notif: + t.Fatal("unexpected push (delta should have been empty)") + } + } +} + +func TestConnStatePusher_OnSnapshotRequestSendsFullWithNonceEcho(t *testing.T) { + sink := newStubSink() + src := &stubPeerStateSource{} + src.set([]PeerStateChangeEvent{ + {Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P}, + {Pubkey: "peerB", ConnType: mgmProto.ConnType_CONN_TYPE_RELAYED}, + }) + p := newConnStatePusherForTest(sink, src, + pusherTuning{baseInterval: time.Hour, maxInterval: time.Hour, doubleAfter: 999}) + defer p.Stop() + p.TriggerInitialSnapshot() + sink.waitForPush(t, 500*time.Millisecond) // initial snapshot + sink.mu.Lock() + sink.pushes = nil + sink.mu.Unlock() + + p.OnSnapshotRequest(42) + sink.waitForPush(t, 500*time.Millisecond) + + got := sink.snapshot() + if len(got) != 1 { + t.Fatalf("want 1 push, got %d", len(got)) + } + if !got[0].GetFullSnapshot() { + t.Error("snapshot-request push must be full") + } + if got[0].GetInResponseToNonce() != 42 { + t.Errorf("want nonce echo 42, got %d", got[0].GetInResponseToNonce()) + } + if len(got[0].GetEntries()) != 2 { + t.Errorf("want 2 entries, got %d", len(got[0].GetEntries())) + } +} + +// Phase 3.7i: stubUnimplementedSink fails every Push with the gRPC +// codes.Unimplemented status. Used to verify the pusher detects and +// latches into the disabled state when talking to a pre-3.7i mgmt +// server. Codex review of Phase 3.7i. +type stubUnimplementedSink struct { + mu sync.Mutex + callCount int + notif chan struct{} +} + +func newStubUnimplementedSink() *stubUnimplementedSink { + return &stubUnimplementedSink{notif: make(chan struct{}, 16)} +} + +func (s *stubUnimplementedSink) Push(_ context.Context, _ *mgmProto.PeerConnectionMap) error { + s.mu.Lock() + s.callCount++ + s.mu.Unlock() + select { + case s.notif <- struct{}{}: + default: + } + return grpcStatusUnimplemented() +} + +func (s *stubUnimplementedSink) calls() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.callCount +} + +// grpcStatusUnimplemented is a thin wrapper so tests don't have to import +// grpc/codes themselves. +func grpcStatusUnimplemented() error { + return grpcCodes_unimplementedError() +} + +// Phase 3.7i / Codex review: when the mgmt server returns Unimplemented +// from SyncPeerConnections, the pusher should latch into "disabled" and +// stop trying to push. Subsequent state changes still flow through the +// normal lazy-state path but no further RPC calls are issued. +func TestConnStatePusher_UnimplementedFromMgmt_LatchesDisabled(t *testing.T) { + sink := newStubUnimplementedSink() + source := &stubPeerStateSource{} + source.set([]PeerStateChangeEvent{ + {Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P}, + }) + p := newConnStatePusherForTest(sink, source, + pusherTuning{baseInterval: 50 * time.Millisecond, maxInterval: 200 * time.Millisecond, doubleAfter: 2}) + defer p.Stop() + p.TriggerInitialSnapshot() + + // Wait for the very first push (initial snapshot), which gets the + // Unimplemented error back and latches `disabled`. + select { + case <-sink.notif: + case <-time.After(500 * time.Millisecond): + t.Fatal("timed out waiting for first push attempt") + } + + // Drive several state changes and ticks. None of them must produce + // further pushes — disabled is sticky for the lifetime of this pusher. + for i := 0; i < 8; i++ { + p.OnPeerStateChange(PeerStateChangeEvent{ + Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_RELAYED, + }) + time.Sleep(30 * time.Millisecond) + } + // Generous quiescence window: 4× baseInterval so any retry would + // have surfaced. + time.Sleep(250 * time.Millisecond) + + if got := sink.calls(); got != 1 { + t.Errorf("after Unimplemented, expected exactly 1 push attempt for the lifetime of the pusher, got %d", got) + } + if !p.disabled.Load() { + t.Error("pusher should have latched disabled=true after Unimplemented") + } +} diff --git a/client/internal/conn_state_pusher_testhelper_test.go b/client/internal/conn_state_pusher_testhelper_test.go new file mode 100644 index 00000000000..826578cec76 --- /dev/null +++ b/client/internal/conn_state_pusher_testhelper_test.go @@ -0,0 +1,13 @@ +package internal + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcCodes_unimplementedError returns a synthetic gRPC status error +// with codes.Unimplemented. Test-only helper kept in the package so +// the test file doesn't have to import grpc/codes directly. +func grpcCodes_unimplementedError() error { + return status.Error(codes.Unimplemented, "method SyncPeerConnections not implemented") +} diff --git a/client/internal/connect.go b/client/internal/connect.go index 72e096a80a1..87768208ac1 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -25,6 +25,7 @@ import ( "github.com/netbirdio/netbird/client/internal/listener" "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/stdnet" @@ -566,6 +567,11 @@ func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConf LazyConnectionEnabled: config.LazyConnectionEnabled, + ConnectionMode: parseConnectionMode(config.ConnectionMode), + RelayTimeoutSeconds: config.RelayTimeoutSeconds, + P2pTimeoutSeconds: config.P2pTimeoutSeconds, + P2pRetryMaxSeconds: config.P2pRetryMaxSeconds, + MTU: selectMTU(config.MTU, peerConfig.Mtu), LogPath: logPath, @@ -695,3 +701,16 @@ func closeConnWithLog(conn *net.UDPConn) { log.Warnf("closing the testing port %d took %s. Usually it is safe to ignore, but continuous warnings may indicate a problem.", conn.LocalAddr().(*net.UDPAddr).Port, time.Since(startClosing)) } } + +// parseConnectionMode is a tolerant wrapper used by the EngineConfig builder. +// An invalid string in the persisted profile (e.g. left over from a +// downgrade-then-upgrade cycle) is logged and treated as Unspecified so the +// daemon falls through to env / server resolution rather than panicking. +func parseConnectionMode(s string) connectionmode.Mode { + m, err := connectionmode.ParseString(s) + if err != nil { + log.Warnf("ignoring invalid connection_mode %q in profile config: %v", s, err) + return connectionmode.ModeUnspecified + } + return m +} diff --git a/client/internal/debouncer/debouncer.go b/client/internal/debouncer/debouncer.go new file mode 100644 index 00000000000..004a03d4aac --- /dev/null +++ b/client/internal/debouncer/debouncer.go @@ -0,0 +1,53 @@ +// Package debouncer provides a small "trigger now or coalesce within a +// window" helper. Used by the engine to debounce SyncMeta calls. +package debouncer + +import ( + "sync" + "time" +) + +// Debouncer coalesces rapid successive Trigger calls: only the last fn +// registered within the delay window is executed, after the window +// expires. +type Debouncer struct { + delay time.Duration + mu sync.Mutex + timer *time.Timer + fn func() +} + +// New creates a Debouncer with the given delay window. +func New(delay time.Duration) *Debouncer { + return &Debouncer{delay: delay} +} + +// Trigger schedules fn to run after the configured delay. Subsequent +// Trigger calls within the window REPLACE the pending fn (last-write-wins) +// and reset the timer. +func (d *Debouncer) Trigger(fn func()) { + d.mu.Lock() + defer d.mu.Unlock() + d.fn = fn + if d.timer != nil { + d.timer.Stop() + } + d.timer = time.AfterFunc(d.delay, func() { + d.mu.Lock() + f := d.fn + d.mu.Unlock() + if f != nil { + f() + } + }) +} + +// Stop cancels any pending fn. Safe to call multiple times. +func (d *Debouncer) Stop() { + d.mu.Lock() + defer d.mu.Unlock() + if d.timer != nil { + d.timer.Stop() + d.timer = nil + } +} diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 0a12a5326e3..c54a0da9a88 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -240,6 +240,14 @@ type BundleGenerator struct { refreshStatus func() // Optional callback to refresh status before bundle generation clientMetrics MetricsExporter + // Phase 3.7h (#5989): server-pushed connection-mode and timer values, captured + // at bundle-generation time so config.txt records both the effective and the + // configured values. Zero/empty when no PeerConfig has been received yet. + serverPushedConnectionMode string + serverPushedRelayTimeoutSec uint32 + serverPushedP2pTimeoutSec uint32 + serverPushedP2pRetryMaxSec uint32 + anonymize bool includeSystemInfo bool logFileCount uint32 @@ -263,6 +271,14 @@ type GeneratorDependencies struct { CapturePath string RefreshStatus func() ClientMetrics MetricsExporter + + // Phase 3.7h (#5989): server-pushed connection-mode + timer values from + // ConnMgr.ServerPushed*(). Recorded in config.txt next to the effective + // values so debug bundles show both. Zero/empty if not provided. + ServerPushedConnectionMode string + ServerPushedRelayTimeoutSec uint32 + ServerPushedP2pTimeoutSec uint32 + ServerPushedP2pRetryMaxSec uint32 } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -285,6 +301,11 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen refreshStatus: deps.RefreshStatus, clientMetrics: deps.ClientMetrics, + serverPushedConnectionMode: deps.ServerPushedConnectionMode, + serverPushedRelayTimeoutSec: deps.ServerPushedRelayTimeoutSec, + serverPushedP2pTimeoutSec: deps.ServerPushedP2pTimeoutSec, + serverPushedP2pRetryMaxSec: deps.ServerPushedP2pRetryMaxSec, + anonymize: cfg.Anonymize, includeSystemInfo: cfg.IncludeSystemInfo, logFileCount: logFileCount, @@ -644,6 +665,22 @@ func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) configContent.WriteString(fmt.Sprintf("LazyConnectionEnabled: %v\n", g.internalConfig.LazyConnectionEnabled)) configContent.WriteString(fmt.Sprintf("MTU: %d\n", g.internalConfig.MTU)) + + // Phase 1+2+3 (#5989) connection-mode resolution + lifecycle timers. + // Effective values (after env > local-config > server-push resolution): + configContent.WriteString(fmt.Sprintf("ConnectionMode: %s\n", g.internalConfig.ConnectionMode)) + configContent.WriteString(fmt.Sprintf("RelayTimeoutSeconds: %d\n", g.internalConfig.RelayTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pTimeoutSeconds: %d\n", g.internalConfig.P2pTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pRetryMaxSeconds: %d\n", g.internalConfig.P2pRetryMaxSeconds)) + + // Server-pushed values from the most recent PeerConfig, captured at bundle + // generation time. Empty/zero indicates no PeerConfig received yet (e.g. + // daemon not yet connected to management). Allows operators to see what + // would apply if all local overrides were cleared. + configContent.WriteString(fmt.Sprintf("ServerPushedConnectionMode: %s\n", g.serverPushedConnectionMode)) + configContent.WriteString(fmt.Sprintf("ServerPushedRelayTimeoutSeconds: %d\n", g.serverPushedRelayTimeoutSec)) + configContent.WriteString(fmt.Sprintf("ServerPushedP2pTimeoutSeconds: %d\n", g.serverPushedP2pTimeoutSec)) + configContent.WriteString(fmt.Sprintf("ServerPushedP2pRetryMaxSeconds: %d\n", g.serverPushedP2pRetryMaxSec)) } func (g *BundleGenerator) addProf() (err error) { diff --git a/client/internal/engine.go b/client/internal/engine.go index 7f19e2d2876..545705c850e 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -34,6 +34,7 @@ import ( nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" + "github.com/netbirdio/netbird/client/internal/debouncer" "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/internal/dns" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" @@ -61,6 +62,7 @@ import ( "github.com/netbirdio/netbird/client/system" nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" mgm "github.com/netbirdio/netbird/shared/management/client" "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" @@ -137,6 +139,26 @@ type EngineConfig struct { LazyConnectionEnabled bool + // ConnectionMode is the resolved peer-connection mode for this daemon + // session. ModeUnspecified means "fall back to LazyConnectionEnabled". + // Set by the caller of NewEngine; usually populated from + // profilemanager.Config.ConnectionMode in connect.go. + ConnectionMode connectionmode.Mode + + // RelayTimeoutSeconds, when > 0, overrides the server-pushed relay + // timeout. 0 means "follow server-pushed value". + RelayTimeoutSeconds uint32 + + // P2pTimeoutSeconds, when > 0, overrides the server-pushed p2p timeout. + // 0 means "follow server-pushed value". Reserved for Phase 2 -- has no + // effect in Phase 1. + P2pTimeoutSeconds uint32 + + // P2pRetryMaxSeconds, when > 0, overrides the server-pushed + // p2p_retry_max_seconds. 0 = use server-pushed value (or built-in + // default 15 min). Phase 3 of #5989. + P2pRetryMaxSeconds uint32 + MTU uint16 // for debug bundle generation @@ -198,6 +220,14 @@ type Engine struct { // networkSerial is the latest CurrentSerial (state ID) of the network sent by the Management service networkSerial uint64 + // Phase 3.7i (Codex review): debounce remote-offline-transitions + // to absorb brief mgmt-reconnect blips. peerOfflineDebounce holds + // pending close-after-grace-period timers keyed by peer pubkey. + // Cancelled when the same peer flips back online before the timer + // fires. Protected by peerOfflineDebounceMu. + peerOfflineDebounce map[string]*time.Timer + peerOfflineDebounceMu sync.Mutex + networkMonitor *networkmonitor.NetworkMonitor sshServer sshServer @@ -246,6 +276,13 @@ type Engine struct { jobExecutorWG sync.WaitGroup exposeManager *expose.Manager + + // Phase 3.7i (#5989): track last-pushed effective config to detect changes. + lastPushedEff mgm.EffectiveConnConfig + syncMetaDebouncer *debouncer.Debouncer + + // Phase 3.7i (#5989): per-peer connection-state pusher. + connStatePusher *connStatePusher } // Peer is an instance of the Connection Peer @@ -280,6 +317,7 @@ func NewEngine( STUNs: []*stun.URI{}, TURNs: []*stun.URI{}, networkSerial: 0, + peerOfflineDebounce: make(map[string]*time.Timer), statusRecorder: services.StatusRecorder, stateManager: services.StateManager, portForwardManager: portforward.NewManager(), @@ -288,12 +326,24 @@ func NewEngine( jobExecutor: jobexec.NewExecutor(), clientMetrics: services.ClientMetrics, updateManager: services.UpdateManager, + syncMetaDebouncer: debouncer.New(5 * time.Second), } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) return engine } +// ConnMgr returns the engine's ConnMgr or nil if the engine has not been +// started yet (or has already shut down). Used by the Android UI to query +// the server-pushed connection mode for the dropdown's "Follow server" +// label. +func (e *Engine) ConnMgr() *ConnMgr { + if e == nil { + return nil + } + return e.connMgr +} + func (e *Engine) Stop() error { if e == nil { // this seems to be a very odd case but there was the possibility if the netbird down command comes before the engine is fully started @@ -302,10 +352,24 @@ func (e *Engine) Stop() error { } e.syncMsgMux.Lock() + if e.syncMetaDebouncer != nil { + e.syncMetaDebouncer.Stop() + } + if e.connMgr != nil { e.connMgr.Close() } + if e.connStatePusher != nil { + e.connStatePusher.Stop() + e.connStatePusher = nil + } + + // Phase 3.7i: cancel all pending offline-close debounce timers so + // none fires after Stop() has begun. Safe to call even if no timers + // were armed. + e.cancelAllRemoteOfflineCloses() + // stopping network monitor first to avoid starting the engine again if e.networkMonitor != nil { e.networkMonitor.Stop() @@ -574,8 +638,53 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.connMgr = NewConnMgr(e.config, e.statusRecorder, e.peerStore, wgIface) e.connMgr.Start(e.ctx) + // Phase 3.7i (#5989), Codex review 2026-05-05: wire the + // ActivityRecorder OnActivity callback to the relay-state ICE- + // upgrade fast-path. Fires at most once per saveFrequency=5s per + // peer when a >32-byte type-4 WG transport packet is observed in + // the receive path. Conn.AttachICEOnRelayActivity gates on: + // mode==p2p-dynamic, conn open, currentConnPriority==Relay, + // no ICE listener, no active backoff, everConnected==true. + // Closes the gap that left peers stuck on relay forever after + // iceTimeout fired (D95820 ↔ w11-test1 hop reproduced 2026-05-05). + if bind := wgIface.GetBind(); bind != nil { + if rec := bind.ActivityRecorder(); rec != nil { + rec.SetOnActivity(func(pubKey string) { + if conn, ok := e.peerStore.PeerConn(pubKey); ok { + conn.AttachICEOnRelayActivity() + } + }) + } + } + + // Phase 3.7i (#5989): start the per-peer connection-state pusher. + e.connStatePusher = newConnStatePusher( + &enginePushSink{engine: e}, + &enginePeerStateSource{engine: e}, + ) + // nil-guard the closures: Engine.Stop() sets e.connStatePusher = nil + // BEFORE removeAllPeers() runs, and removeAllPeers triggers + // notifyConnStateChange callbacks for every peer being torn down. + // Without the guard, every disconnect crashed the daemon with a + // nil-pointer deref. The same applies to the snapshot handler in + // case mgmt sends a request during shutdown. + e.statusRecorder.SetConnStateListener(func(pubkey string, st peer.State) { + pusher := e.connStatePusher + if pusher == nil { + return + } + pusher.OnPeerStateChange(peerStateToEvent(pubkey, st)) + }) + e.mgmClient.SetSnapshotRequestHandler(func(nonce uint64) { + pusher := e.connStatePusher + if pusher == nil { + return + } + pusher.OnSnapshotRequest(nonce) + }) + e.srWatcher = guard.NewSRWatcher(e.signal, e.relayManager, e.mobileDep.IFaceDiscover, iceCfg) - e.srWatcher.Start(peer.IsForceRelayed()) + e.srWatcher.Start(peer.IsForceRelayed()) //nolint:staticcheck // intentionally retained for Phase-1 backwards compat e.receiveSignalEvents() e.receiveManagementEvents() @@ -799,6 +908,13 @@ func (e *Engine) removeAllPeers() error { func (e *Engine) removePeer(peerKey string) error { log.Debugf("removing peer from engine %s", peerKey) + // Phase 3.7i hardening: cancel any pending offline-debounce timer + // for this peer BEFORE the conn is closed. The timer's callback + // already nil-guards against a missing peerStore entry, but + // cancelling explicitly avoids a wasted goroutine wakeup + + // log-noise once the peer is gone for good. + e.cancelRemoteOfflineClose(peerKey) + e.connMgr.RemovePeerConn(peerKey) err := e.statusRecorder.RemovePeer(peerKey) @@ -1115,6 +1231,15 @@ func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobR }, } + // Phase 3.7h (#5989): record server-pushed mode + timers in config.txt so + // debug bundles capture both effective and configured values. + if e.connMgr != nil { + bundleDeps.ServerPushedConnectionMode = e.connMgr.ServerPushedMode().String() + bundleDeps.ServerPushedRelayTimeoutSec = e.connMgr.ServerPushedRelayTimeoutSecs() + bundleDeps.ServerPushedP2pTimeoutSec = e.connMgr.ServerPushedP2pTimeoutSecs() + bundleDeps.ServerPushedP2pRetryMaxSec = e.connMgr.ServerPushedP2pRetryMaxSecs() + } + bundleJobParams := debug.BundleConfig{ Anonymize: params.Anonymize, IncludeSystemInfo: true, @@ -1231,8 +1356,59 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { return nil } - if err := e.connMgr.UpdatedRemoteFeatureFlag(e.ctx, networkMap.GetPeerConfig().GetLazyConnectionEnabled()); err != nil { - log.Errorf("failed to update lazy connection feature flag: %v", err) + if err := e.connMgr.UpdatedRemotePeerConfig(e.ctx, networkMap.GetPeerConfig()); err != nil { + log.Errorf("failed to update connection mode from PeerConfig: %v", err) + } + + // Phase 3.7i hardening: if the management push moved us out of + // p2p-dynamic mode, cancel any pending offline-debounce timers + // they would have closed connections that are no longer + // dynamically managed. The timer-callback re-validation also + // covers this race, but explicit cancellation drops dead timer + // goroutines faster and keeps the timer-map empty for inspection. + if e.connMgr != nil && e.connMgr.Mode() != connectionmode.ModeP2PDynamic { + e.cancelAllRemoteOfflineCloses() + } + + // Phase 3.7i (#5989): record + push effective values. + newEff := mgm.EffectiveConnConfig{ + Mode: e.connMgr.Mode().String(), + RelayTimeoutSecs: e.connMgr.RelayTimeout(), + P2PTimeoutSecs: e.connMgr.P2pTimeout(), + P2PRetryMaxSecs: e.connMgr.P2pRetryMax(), + } + e.mgmClient.SetEffectiveConnConfig(newEff) + if e.lastPushedEff != newEff { + e.lastPushedEff = newEff + // Debounce SyncMeta so a burst of NetworkMap updates doesn't + // generate a burst of SyncMeta calls. + e.syncMetaDebouncer.Trigger(func() { + info, err := system.GetInfoWithChecks(e.ctx, e.checks) + if err != nil { + log.Warnf("failed to get system info for SyncMeta: %v", err) + info = system.GetInfo(e.ctx) + } + info.SetFlags( + e.config.RosenpassEnabled, + e.config.RosenpassPermissive, + &e.config.ServerSSHAllowed, + e.config.DisableClientRoutes, + e.config.DisableServerRoutes, + e.config.DisableDNS, + e.config.DisableFirewall, + e.config.BlockLANAccess, + e.config.BlockInbound, + e.config.LazyConnectionEnabled, + e.config.EnableSSHRoot, + e.config.EnableSSHSFTP, + e.config.EnableSSHLocalPortForwarding, + e.config.EnableSSHRemotePortForwarding, + e.config.DisableSSHAuth, + ) + if err := e.mgmClient.SyncMeta(info); err != nil { + log.Warnf("SyncMeta after effective-mode change: %v", err) + } + }) } if e.firewall != nil { @@ -1296,6 +1472,27 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { e.updateOfflinePeers(networkMap.GetOfflinePeers()) + // Phase 3.7i (#5989): populate RemoteMeta for offline peers so the + // daemon-RPC StatusResponse can show them with their groups + last_seen. + for _, op := range networkMap.GetOfflinePeers() { + if err := e.statusRecorder.UpdatePeerRemoteMeta(op.GetWgPubKey(), peer.RemoteMeta{ + EffectiveConnectionMode: op.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: op.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: op.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: op.GetEffectiveP2PRetryMaxSecs(), + ConfiguredConnectionMode: op.GetConfiguredConnectionMode(), + ConfiguredRelayTimeoutSecs: op.GetConfiguredRelayTimeoutSecs(), + ConfiguredP2PTimeoutSecs: op.GetConfiguredP2PTimeoutSecs(), + ConfiguredP2PRetryMaxSecs: op.GetConfiguredP2PRetryMaxSecs(), + Groups: op.GetGroups(), + LastSeenAtServer: peer.TimestampOrZero(op.GetLastSeenAtServer()), + LiveOnline: op.GetLiveOnline(), + ServerLivenessKnown: op.GetServerLivenessKnown(), + }); err != nil { + log.Debugf("UpdatePeerRemoteMeta(offline %s): %v", op.GetWgPubKey(), err) + } + } + // Filter out own peer from the remote peers list localPubKey := e.config.WgPrivateKey.PublicKey().String() remotePeers := make([]*mgmProto.RemotePeerConfig, 0, len(networkMap.GetRemotePeers())) @@ -1309,6 +1506,9 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { if networkMap.GetRemotePeersIsEmpty() { err := e.removeAllPeers() e.statusRecorder.FinishPeerListModifications() + if e.connStatePusher != nil { + e.connStatePusher.TriggerInitialSnapshot() + } if err != nil { return err } @@ -1329,6 +1529,12 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { } e.statusRecorder.FinishPeerListModifications() + // Phase 3.7i: peers are populated for the first time; release + // the conn-state pusher so its initial full snapshot reflects + // the actual peer set instead of an empty map. + if e.connStatePusher != nil { + e.connStatePusher.TriggerInitialSnapshot() + } e.updatePeerSSHHostKeys(remotePeers) @@ -1337,6 +1543,62 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { } e.updateSSHServerAuth(networkMap.GetSshAuth()) + + // Phase 3.7i (#5989): mirror RemotePeerConfig fields into peer.Status + // so daemon-RPC StatusResponse exposes them for UIs. Also detect + // LiveOnline true->false transitions so we can proactively close + // the local conn under p2p-dynamic instead of letting its guard + // spam reconnect-offers for ~relay_timeout minutes after the peer + // disappeared. + for _, rp := range remotePeers { + pubKey := rp.GetWgPubKey() + liveOnline := rp.GetLiveOnline() + livenessKnown := rp.GetServerLivenessKnown() + var prevLiveOnline bool + var prevLivenessKnown bool + if prev, err := e.statusRecorder.GetPeer(pubKey); err == nil { + prevLiveOnline = prev.RemoteLiveOnline + prevLivenessKnown = prev.RemoteServerLivenessKnown + } + if err := e.statusRecorder.UpdatePeerRemoteMeta(pubKey, peer.RemoteMeta{ + EffectiveConnectionMode: rp.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: rp.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: rp.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: rp.GetEffectiveP2PRetryMaxSecs(), + ConfiguredConnectionMode: rp.GetConfiguredConnectionMode(), + ConfiguredRelayTimeoutSecs: rp.GetConfiguredRelayTimeoutSecs(), + ConfiguredP2PTimeoutSecs: rp.GetConfiguredP2PTimeoutSecs(), + ConfiguredP2PRetryMaxSecs: rp.GetConfiguredP2PRetryMaxSecs(), + Groups: rp.GetGroups(), + LastSeenAtServer: peer.TimestampOrZero(rp.GetLastSeenAtServer()), + LiveOnline: liveOnline, + ServerLivenessKnown: livenessKnown, + }); err != nil { + log.Debugf("UpdatePeerRemoteMeta(%s): %v", pubKey, err) + } + // Transition true->false (under a phase-3.7i+ mgmt that + // authoritatively knows liveness) and we run p2p-dynamic -> + // stop the conn so the lazy mgr re-registers it as Idle. + // This prevents the guard's offer-spam loop and avoids the + // "remote reconnects -> our offer wakes their lazy mgr -> + // instant P2P even without local traffic" symptom. + // + // Codex review: debounce by 5 s. A brief mgmt-reconnect on + // the local end (cellular handover, Doze wakeup) can flip + // liveness false then back to true within the same network- + // map round-trip. Without the grace period we'd tear down + // every peer in those moments. The timer is cancelled in + // the true->true branch below. + if livenessKnown && prevLivenessKnown && prevLiveOnline && !liveOnline && + e.connMgr != nil && e.connMgr.Mode() == connectionmode.ModeP2PDynamic { + e.scheduleRemoteOfflineClose(pubKey) + } + // Transition false->true (or first sighting as online) -> + // cancel any pending offline-close timer for this peer. + if liveOnline && livenessKnown { + e.cancelRemoteOfflineClose(pubKey) + } + } } // must set the exclude list after the peers are added. Without it the manager can not figure out the peers parameters from the store @@ -1528,7 +1790,11 @@ func (e *Engine) addNewPeer(peerConfig *mgmProto.RemotePeerConfig) error { } if exists := e.connMgr.AddPeerConn(e.ctx, peerKey, conn); exists { - conn.Close(false) + // Cleanup of a freshly-created Conn that lost the AddPeerConn + // race -- the OTHER Conn for this peer is now the live one in + // peerStore. The WG peer entry (if any) belongs to that other + // Conn, so this Close must NOT touch it. keepWgPeer=true. + conn.Close(false, true) return fmt.Errorf("peer already exists: %s", peerKey) } @@ -1560,7 +1826,9 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV Addr: e.getRosenpassAddr(), PermissiveMode: e.config.RosenpassPermissive, }, - ICEConfig: e.createICEConfig(), + ICEConfig: e.createICEConfig(), + Mode: e.connMgr.Mode(), + P2pRetryMaxSeconds: e.connMgr.P2pRetryMax(), } serviceDependencies := peer.ServiceDependencies{ @@ -2486,3 +2754,85 @@ func decodeRelayIP(b []byte) netip.Addr { } return ip.Unmap() } + +// remoteOfflineGracePeriod is how long we wait after a peer flips +// from live_online=true to live_online=false before tearing the local +// peer connection down. Absorbs short mgmt-reconnect blips on either +// end (cellular handover, Doze wakeup, brief NAT-rebind). +const remoteOfflineGracePeriod = 5 * time.Second + +// scheduleRemoteOfflineClose arms a timer that will close the local +// peer connection for pubKey after remoteOfflineGracePeriod. If a +// timer is already armed for this peer the call is a no-op (the +// existing one fires on schedule). Idempotent. +func (e *Engine) scheduleRemoteOfflineClose(pubKey string) { + e.peerOfflineDebounceMu.Lock() + defer e.peerOfflineDebounceMu.Unlock() + if _, exists := e.peerOfflineDebounce[pubKey]; exists { + return + } + t := time.AfterFunc(remoteOfflineGracePeriod, func() { + e.peerOfflineDebounceMu.Lock() + delete(e.peerOfflineDebounce, pubKey) + e.peerOfflineDebounceMu.Unlock() + // Codex review: re-validate on fire. Several preconditions + // must still hold: + // 1. engine context not cancelled (Stop() in flight) + // 2. connMgr still in p2p-dynamic mode (mode-switch racing) + // 3. peer still has a peerConn AND status recorder still + // reports the peer as remote-offline (the live state + // could have flipped back without us cancelling — e.g. + // mgmt push for a different peer landed before this fire) + // Without these checks the debounce fires blindly and can + // tear down a perfectly good conn in any of those races. + if e.ctx == nil || e.ctx.Err() != nil { + return + } + if e.connMgr == nil || e.connMgr.Mode() != connectionmode.ModeP2PDynamic { + return + } + if state, err := e.statusRecorder.GetPeer(pubKey); err == nil { + if !state.RemoteServerLivenessKnown || state.RemoteLiveOnline { + return + } + } else { + return + } + conn, ok := e.peerStore.PeerConn(pubKey) + if !ok { + return + } + log.Infof("[peer: %s] remote went offline (debounced %s), closing local conn (p2p-dynamic)", pubKey, remoteOfflineGracePeriod) + // Remote-offline close: keep the WG peer entry so that if the + // remote comes back online and traffic flows, the route-mgr- + // applied AllowedIPs are still in place. The lazy-mgr will + // reactivate the peer through the activity listener. + conn.Close(false, true) + }) + e.peerOfflineDebounce[pubKey] = t +} + +// cancelRemoteOfflineClose stops a pending offline-close timer for +// pubKey if one is armed. Called when the peer flips back to +// live_online=true within the grace window. +func (e *Engine) cancelRemoteOfflineClose(pubKey string) { + e.peerOfflineDebounceMu.Lock() + defer e.peerOfflineDebounceMu.Unlock() + if t, ok := e.peerOfflineDebounce[pubKey]; ok { + t.Stop() + delete(e.peerOfflineDebounce, pubKey) + } +} + +// cancelAllRemoteOfflineCloses stops every pending offline-close +// timer. Called by Engine.Stop() (and on any future mode-switch out of +// p2p-dynamic) so a still-pending timer can't fire after the engine +// has begun shutdown. +func (e *Engine) cancelAllRemoteOfflineCloses() { + e.peerOfflineDebounceMu.Lock() + defer e.peerOfflineDebounceMu.Unlock() + for k, t := range e.peerOfflineDebounce { + t.Stop() + delete(e.peerOfflineDebounce, k) + } +} diff --git a/client/internal/engine_offline_debounce_test.go b/client/internal/engine_offline_debounce_test.go new file mode 100644 index 00000000000..082c56b4b67 --- /dev/null +++ b/client/internal/engine_offline_debounce_test.go @@ -0,0 +1,118 @@ +package internal + +import ( + "sync" + "testing" + "time" +) + +// Codex review: the offline-debounce timer can fire after Engine.Stop +// or after a mode-switch. Most of the safety checks live INSIDE the +// time.AfterFunc callback (re-validate ctx / mode / liveness) so a +// pure-unit test of those branches needs the full engine. +// +// The tests below cover the synchronous timer-map operations that DON'T +// require a full engine: scheduleRemoteOfflineClose idempotency, +// cancelRemoteOfflineClose, and cancelAllRemoteOfflineCloses. +// +// The callback re-validation paths are exercised by the end-to-end +// tests on real hardware (see docs/superpowers/test-reports/ +// 2026-05-03-netbird-phase3-7i-end-to-end.md) and protected +// structurally — every guard returns early before touching peerStore. + +// engineForDebounceTest builds an Engine value with just enough state +// to drive the timer-map helpers. The callback's re-validation branches +// won't fire because we never let the timer reach its deadline in +// these unit tests. +func engineForDebounceTest() *Engine { + return &Engine{ + peerOfflineDebounce: make(map[string]*time.Timer), + } +} + +func TestScheduleRemoteOfflineClose_StoresTimer(t *testing.T) { + e := engineForDebounceTest() + e.scheduleRemoteOfflineClose("peerA") + e.peerOfflineDebounceMu.Lock() + _, ok := e.peerOfflineDebounce["peerA"] + e.peerOfflineDebounceMu.Unlock() + if !ok { + t.Fatal("schedule must store a timer for the peer") + } + // cleanup so AfterFunc doesn't fire its callback after the test + e.cancelAllRemoteOfflineCloses() +} + +func TestScheduleRemoteOfflineClose_IsIdempotent(t *testing.T) { + e := engineForDebounceTest() + e.scheduleRemoteOfflineClose("peerA") + e.peerOfflineDebounceMu.Lock() + t1 := e.peerOfflineDebounce["peerA"] + e.peerOfflineDebounceMu.Unlock() + + e.scheduleRemoteOfflineClose("peerA") // second call + + e.peerOfflineDebounceMu.Lock() + t2 := e.peerOfflineDebounce["peerA"] + e.peerOfflineDebounceMu.Unlock() + + if t1 != t2 { + t.Error("second schedule for same peer must not replace the existing timer") + } + e.cancelAllRemoteOfflineCloses() +} + +func TestCancelRemoteOfflineClose_RemovesEntry(t *testing.T) { + e := engineForDebounceTest() + e.scheduleRemoteOfflineClose("peerA") + e.cancelRemoteOfflineClose("peerA") + e.peerOfflineDebounceMu.Lock() + _, ok := e.peerOfflineDebounce["peerA"] + e.peerOfflineDebounceMu.Unlock() + if ok { + t.Fatal("cancel must remove the peer from the timer map") + } +} + +func TestCancelRemoteOfflineClose_OnAbsentPeer_NoOp(t *testing.T) { + e := engineForDebounceTest() + // must not panic + e.cancelRemoteOfflineClose("never-scheduled") + if len(e.peerOfflineDebounce) != 0 { + t.Error("map must remain empty") + } +} + +func TestCancelAllRemoteOfflineCloses_ClearsEverything(t *testing.T) { + e := engineForDebounceTest() + for _, k := range []string{"a", "b", "c", "d"} { + e.scheduleRemoteOfflineClose(k) + } + if len(e.peerOfflineDebounce) != 4 { + t.Fatalf("setup: expected 4 timers, got %d", len(e.peerOfflineDebounce)) + } + e.cancelAllRemoteOfflineCloses() + if len(e.peerOfflineDebounce) != 0 { + t.Errorf("cancel-all must clear the map, got %d entries", len(e.peerOfflineDebounce)) + } +} + +// Stress: schedule + cancel from multiple goroutines concurrently. +// Mutex must keep the map consistent. +func TestRemoteOfflineDebounce_ConcurrentSafe(t *testing.T) { + e := engineForDebounceTest() + var wg sync.WaitGroup + for i := 0; i < 50; i++ { + wg.Add(2) + go func(i int) { + defer wg.Done() + e.scheduleRemoteOfflineClose("peer" + string(rune('A'+i%5))) + }(i) + go func(i int) { + defer wg.Done() + e.cancelRemoteOfflineClose("peer" + string(rune('A'+i%5))) + }(i) + } + wg.Wait() + e.cancelAllRemoteOfflineCloses() +} diff --git a/client/internal/engine_pusher_adapters.go b/client/internal/engine_pusher_adapters.go new file mode 100644 index 00000000000..bbca5397ee1 --- /dev/null +++ b/client/internal/engine_pusher_adapters.go @@ -0,0 +1,62 @@ +package internal + +import ( + "context" + + "github.com/netbirdio/netbird/client/internal/peer" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// enginePushSink bridges the Engine's mgmClient to the PushSink interface +// consumed by connStatePusher. Phase 3.7i of #5989. +type enginePushSink struct{ engine *Engine } + +func (s *enginePushSink) Push(ctx context.Context, m *mgmProto.PeerConnectionMap) error { + return s.engine.mgmClient.SyncPeerConnections(ctx, m) +} + +// enginePeerStateSource bridges the Engine's statusRecorder to the +// PeerStateSource interface consumed by connStatePusher. Phase 3.7i of #5989. +type enginePeerStateSource struct{ engine *Engine } + +func (s *enginePeerStateSource) SnapshotAllRemotePeers() []PeerStateChangeEvent { + fs := s.engine.statusRecorder.GetFullStatus() + out := make([]PeerStateChangeEvent, 0, len(fs.Peers)) + for _, st := range fs.Peers { + out = append(out, peerStateToEvent(st.PubKey, st)) + } + return out +} + +// peerStateToEvent converts a peer.State to a PeerStateChangeEvent suitable +// for the connStatePusher. The Endpoint field is set to +// "local ↔ remote" when both ICE candidate endpoints are known. +func peerStateToEvent(pubkey string, st peer.State) PeerStateChangeEvent { + var ct mgmProto.ConnType + switch { + case st.ConnStatus == peer.StatusConnected && !st.Relayed: + ct = mgmProto.ConnType_CONN_TYPE_P2P + case st.ConnStatus == peer.StatusConnected && st.Relayed: + ct = mgmProto.ConnType_CONN_TYPE_RELAYED + case st.ConnStatus == peer.StatusConnecting: + ct = mgmProto.ConnType_CONN_TYPE_CONNECTING + default: + ct = mgmProto.ConnType_CONN_TYPE_IDLE + } + + endpoint := st.LocalIceCandidateEndpoint + if endpoint != "" && st.RemoteIceCandidateEndpoint != "" { + endpoint = st.LocalIceCandidateEndpoint + " <-> " + st.RemoteIceCandidateEndpoint + } + + return PeerStateChangeEvent{ + Pubkey: pubkey, + ConnType: ct, + LastHandshake: st.LastWireguardHandshake, + LatencyMS: uint32(st.Latency.Milliseconds()), + Endpoint: endpoint, + RelayServer: st.RelayServerAddress, + RxBytes: uint64(st.BytesRx), + TxBytes: uint64(st.BytesTx), + } +} diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index f4c5be70a52..98730bc4aba 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -1671,7 +1671,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil, nil, nil) if err != nil { return nil, "", err } diff --git a/client/internal/lazyconn/activity/listener_bind_test.go b/client/internal/lazyconn/activity/listener_bind_test.go index 1baaae6bef9..c149f9891d5 100644 --- a/client/internal/lazyconn/activity/listener_bind_test.go +++ b/client/internal/lazyconn/activity/listener_bind_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "github.com/netbirdio/netbird/client/iface/bind" "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/lazyconn" @@ -40,6 +41,14 @@ func (m *mockEndpointManager) GetEndpoint(fakeIP netip.Addr) net.Conn { return m.endpoints[fakeIP] } +// ActivityRecorder satisfies the device.EndpointManager interface. +// The mock returns nil (no recorder) — sufficient for listener_bind +// tests which exercise endpoint dispatch only and never invoke the +// recorder's OnActivity callback. Phase 3.7i, Codex review 2026-05-05. +func (m *mockEndpointManager) ActivityRecorder() *bind.ActivityRecorder { + return nil +} + // MockWGIfaceBind mocks WgInterface with bind support type MockWGIfaceBind struct { endpointMgr *mockEndpointManager diff --git a/client/internal/lazyconn/env.go b/client/internal/lazyconn/env.go index 649d1cd65de..cfdcc67d61d 100644 --- a/client/internal/lazyconn/env.go +++ b/client/internal/lazyconn/env.go @@ -12,6 +12,11 @@ const ( EnvInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +// IsLazyConnEnabledByEnv reads NB_ENABLE_EXPERIMENTAL_LAZY_CONN. +// +// Deprecated: use peer.ResolveModeFromEnv() -- kept here to not break +// existing callers in conn_mgr.go during the Phase-1 refactor; will be +// removed once all call sites use the new resolver. func IsLazyConnEnabledByEnv() bool { val := os.Getenv(EnvEnableLazyConn) if val == "" { diff --git a/client/internal/lazyconn/inactivity/manager.go b/client/internal/lazyconn/inactivity/manager.go index 0120f443049..3758eb0869b 100644 --- a/client/internal/lazyconn/inactivity/manager.go +++ b/client/internal/lazyconn/inactivity/manager.go @@ -14,6 +14,14 @@ import ( const ( checkInterval = 1 * time.Minute + // DefaultInactivityThreshold is the relay-tunnel idle-teardown + // fallback when neither client config nor server-pushed value sets + // it. Reverted 2026-05-07 to the original 15 min value (was bumped + // to 24 h locally during 2026-05-03 testing, but that change is not + // in scope for this PR — keeping existing p2p-lazy semantics intact + // is required so p2p-dynamic is the only mode whose lifecycle + // changes. p2p-dynamic users that want a longer warm window can + // override via per-peer relay_timeout_seconds. DefaultInactivityThreshold = 15 * time.Minute MinimumInactivityThreshold = 1 * time.Minute ) @@ -22,30 +30,89 @@ type WgInterface interface { LastActivities() map[string]monotime.Time } +// Manager watches per-peer activity timestamps from the WireGuard +// interface and notifies via channels when peers cross inactivity +// thresholds. +// +// Phase 2 (#5989) introduced TWO independent thresholds per peer: +// - iceTimeout fires the iceInactiveChan (consumer detaches the ICE +// worker but keeps the relay-tunnel up). +// - relayTimeout fires the relayInactiveChan (consumer tears down +// the whole connection). +// +// Threshold == 0 disables that channel for all peers (the corresponding +// teardown never fires). Phase-1 p2p-lazy is expressed as +// iceTimeout=0 + relayTimeout=X; the legacy InactivePeersChan is the +// same as RelayInactiveChan for backwards compat. type Manager struct { - inactivePeersChan chan map[string]struct{} + iface WgInterface - iface WgInterface - interestedPeers map[string]*lazyconn.PeerConfig + // Two-timer thresholds (Phase 2). Both 0 = manager is effectively + // inert (peers register but no channel ever fires). + iceTimeout time.Duration + relayTimeout time.Duration + + interestedPeers map[string]*lazyconn.PeerConfig + + iceInactiveChan chan map[string]struct{} + relayInactiveChan chan map[string]struct{} + + // inactivityThreshold + inactivePeersChan are kept for the + // Phase-1 NewManager API. Internally they alias to the relay + // timeout / channel. inactivityThreshold time.Duration + inactivePeersChan chan map[string]struct{} } +// NewManager is the Phase-1 single-timer constructor. Pass a *time.Duration +// to override the default DefaultInactivityThreshold; nil uses the default. +// +// Deprecated: use NewManagerWithTwoTimers. NewManager remains the entry +// point for callers that haven't been migrated; it constructs a manager +// with iceTimeout=0 (= ICE always-on, p2p-lazy semantics). func NewManager(iface WgInterface, configuredThreshold *time.Duration) *Manager { - inactivityThreshold, err := validateInactivityThreshold(configuredThreshold) + threshold, err := validateInactivityThreshold(configuredThreshold) if err != nil { - inactivityThreshold = DefaultInactivityThreshold + threshold = DefaultInactivityThreshold log.Warnf("invalid inactivity threshold configured: %v, using default: %v", err, DefaultInactivityThreshold) } - log.Infof("inactivity threshold configured: %v", inactivityThreshold) + log.Infof("inactivity threshold configured: %v", threshold) + return newManager(iface, 0, threshold) +} + +// NewManagerWithTwoTimers is the Phase-2 constructor. Pass 0 for either +// timeout to disable that teardown path. Both 0 leaves the manager +// running but inert (no channel ever fires) -- used by p2p / relay-forced +// modes that don't tear down workers. +func NewManagerWithTwoTimers(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + if iceTimeout > 0 { + log.Infof("ICE inactivity timeout: %v", iceTimeout) + } + if relayTimeout > 0 { + log.Infof("relay inactivity timeout: %v", relayTimeout) + } + return newManager(iface, iceTimeout, relayTimeout) +} + +func newManager(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + relayCh := make(chan map[string]struct{}, 1) return &Manager{ - inactivePeersChan: make(chan map[string]struct{}, 1), iface: iface, + iceTimeout: iceTimeout, + relayTimeout: relayTimeout, interestedPeers: make(map[string]*lazyconn.PeerConfig), - inactivityThreshold: inactivityThreshold, + iceInactiveChan: make(chan map[string]struct{}, 1), + relayInactiveChan: relayCh, + inactivityThreshold: relayTimeout, + inactivePeersChan: relayCh, // Phase-1 alias: same channel as relayInactiveChan } } +// InactivePeersChan is the Phase-1 channel for whole-tunnel teardown. +// In the Phase-2 internal model this is the same channel as +// RelayInactiveChan -- existing callers (engine.go p2p-lazy path) keep +// working unchanged. func (m *Manager) InactivePeersChan() chan map[string]struct{} { if m == nil { // return a nil channel that blocks forever @@ -55,6 +122,26 @@ func (m *Manager) InactivePeersChan() chan map[string]struct{} { return m.inactivePeersChan } +// ICEInactiveChan returns the channel that signals ICE-worker-only +// inactivity per peer (consumer typically calls Conn.DetachICE). +// Always returns a valid channel; if iceTimeout is 0, the channel +// just never fires. +func (m *Manager) ICEInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.iceInactiveChan +} + +// RelayInactiveChan returns the channel that signals relay-worker +// (and thus whole-tunnel) inactivity per peer. +func (m *Manager) RelayInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.relayInactiveChan +} + func (m *Manager) AddPeer(peerCfg *lazyconn.PeerConfig) { if m == nil { return @@ -95,24 +182,25 @@ func (m *Manager) Start(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C(): - idlePeers, err := m.checkStats() + iceIdle, relayIdle, err := m.checkStats() if err != nil { log.Errorf("error checking stats: %v", err) return } - if len(idlePeers) == 0 { - continue + if len(iceIdle) > 0 { + m.notifyChan(ctx, m.iceInactiveChan, iceIdle) + } + if len(relayIdle) > 0 { + m.notifyChan(ctx, m.relayInactiveChan, relayIdle) } - - m.notifyInactivePeers(ctx, idlePeers) } } } -func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[string]struct{}) { +func (m *Manager) notifyChan(ctx context.Context, ch chan map[string]struct{}, peers map[string]struct{}) { select { - case m.inactivePeersChan <- inactivePeers: + case ch <- peers: case <-ctx.Done(): return default: @@ -120,10 +208,24 @@ func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[str } } -func (m *Manager) checkStats() (map[string]struct{}, error) { +// checkStats walks the per-peer activity-since values and groups peers +// into two sets: +// - iceIdle: peers idle longer than iceTimeout (only populated when +// iceTimeout > 0; otherwise this set is always empty) +// - relayIdle: peers idle longer than relayTimeout (only populated +// when relayTimeout > 0) +// +// Both sets are returned independently so consumers can act on each +// without coupling. A peer that has crossed both thresholds appears in +// both sets and the consumer is expected to handle them in order +// (first DetachICE on the iceIdle set, then full Close on the relayIdle +// set; the order is fine because Close on a peer where ICE is already +// detached is still correct). +func (m *Manager) checkStats() (iceIdle, relayIdle map[string]struct{}, err error) { lastActivities := m.iface.LastActivities() - idlePeers := make(map[string]struct{}) + iceIdle = make(map[string]struct{}) + relayIdle = make(map[string]struct{}) checkTime := time.Now() for peerID, peerCfg := range m.interestedPeers { @@ -135,13 +237,18 @@ func (m *Manager) checkStats() (map[string]struct{}, error) { } since := monotime.Since(lastActive) - if since > m.inactivityThreshold { - peerCfg.Log.Infof("peer is inactive since time: %s", checkTime.Add(-since).String()) - idlePeers[peerID] = struct{}{} + + if m.iceTimeout > 0 && since > m.iceTimeout { + peerCfg.Log.Debugf("peer ICE idle since: %s", checkTime.Add(-since).String()) + iceIdle[peerID] = struct{}{} + } + if m.relayTimeout > 0 && since > m.relayTimeout { + peerCfg.Log.Infof("peer relay idle since: %s", checkTime.Add(-since).String()) + relayIdle[peerID] = struct{}{} } } - return idlePeers, nil + return iceIdle, relayIdle, nil } func validateInactivityThreshold(configuredThreshold *time.Duration) (time.Duration, error) { diff --git a/client/internal/lazyconn/inactivity/manager_test.go b/client/internal/lazyconn/inactivity/manager_test.go index 10b4ef1ebb4..db3e648867c 100644 --- a/client/internal/lazyconn/inactivity/manager_test.go +++ b/client/internal/lazyconn/inactivity/manager_test.go @@ -23,9 +23,11 @@ func (m *mockWgInterface) LastActivities() map[string]monotime.Time { func TestPeerTriggersInactivity(t *testing.T) { peerID := "peer1" + // Past activity must exceed DefaultInactivityThreshold (24 h after + // the Phase-3.7i tuning) — pick 25 h for safety margin. wgMock := &mockWgInterface{ lastActivities: map[string]monotime.Time{ - peerID: monotime.Time(int64(monotime.Now()) - int64(20*time.Minute)), + peerID: monotime.Time(int64(monotime.Now()) - int64(25*time.Hour)), }, } @@ -112,3 +114,263 @@ func (f *fakeTickerMock) C() <-chan time.Time { } func (f *fakeTickerMock) Stop() {} + +// --- Phase 2 (#5989) two-timer tests --- + +// makePeerCfg is a test helper for building a minimal PeerConfig with logger. +func makePeerCfg(peerID string) *lazyconn.PeerConfig { + return &lazyconn.PeerConfig{ + PublicKey: peerID, + Log: log.WithField("peer", peerID), + } +} + +// pastActivity returns a monotime.Time corresponding to (now - d). +func pastActivity(d time.Duration) monotime.Time { + return monotime.Time(int64(monotime.Now()) - int64(d)) +} + +func TestTwoTimers_OnlyICEFires(t *testing.T) { + peerID := "peer1" + + // Peer idle for 6 minutes: above iceTimeout (5m), below relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID, "expected peerID on ICE channel") + case <-time.After(1 * time.Second): + t.Fatal("expected ICE-inactive event, none received") + } + + // Relay channel must NOT fire. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should not fire when only iceTimeout exceeded") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothFire(t *testing.T) { + peerID := "peer1" + + // Peer idle for 25h: above both iceTimeout (5m) and relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(25 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + gotICE := false + gotRelay := false + deadline := time.After(1 * time.Second) + for !gotICE || !gotRelay { + select { + case peers := <-manager.ICEInactiveChan(): + if _, ok := peers[peerID]; ok { + gotICE = true + } + case peers := <-manager.RelayInactiveChan(): + if _, ok := peers[peerID]; ok { + gotRelay = true + } + case <-deadline: + t.Fatalf("timeout waiting for both channels (gotICE=%v, gotRelay=%v)", gotICE, gotRelay) + } + } +} + +func TestTwoTimers_ICEDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=0 (disabled) + relayTimeout=10m, peer idle 11m -> only relay fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(11 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 10*time.Minute) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.RelayInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("relay channel should fire when relayTimeout exceeded") + } + + // ICE channel must never fire because iceTimeout=0. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel should NEVER fire when iceTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_RelayDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=5m + relayTimeout=0, peer idle 6m -> only ICE fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("ICE channel should fire when iceTimeout exceeded") + } + + // Relay channel must never fire because relayTimeout=0. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should NEVER fire when relayTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothDisabled(t *testing.T) { + peerID := "peer1" + + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(99 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // Neither channel should fire. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire when both disabled") + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel must not fire when both disabled") + case <-time.After(300 * time.Millisecond): + // expected + } +} + +// TestPhase1_LazyEquivalence verifies that the legacy NewManager constructor +// behaves identically to the Phase-1 single-timer code: peers cross the +// (single) inactivityThreshold and appear on InactivePeersChan, ICE +// channel never fires. +func TestPhase1_LazyEquivalence(t *testing.T) { + peerID := "peer1" + + // DefaultInactivityThreshold is 24 h (Phase-3.7i tuning); use 25 h + // of past activity so the test is robust to that constant changing + // in either direction. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(25 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + // Phase-1 entry point with default threshold. + manager := NewManager(wgMock, nil) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // InactivePeersChan (Phase-1 alias of RelayInactiveChan) must fire. + select { + case peers := <-manager.InactivePeersChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("Phase-1 InactivePeersChan must fire (= RelayInactiveChan in Phase 2)") + } + + // ICE channel must NEVER fire from Phase-1 entry point (iceTimeout=0). + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire in Phase-1 NewManager mode") + case <-time.After(200 * time.Millisecond): + // expected + } +} diff --git a/client/internal/lazyconn/manager/manager.go b/client/internal/lazyconn/manager/manager.go index fc47bda39d5..2ca2f0fe4d3 100644 --- a/client/internal/lazyconn/manager/manager.go +++ b/client/internal/lazyconn/manager/manager.go @@ -28,7 +28,31 @@ type managedPeer struct { } type Config struct { + // Phase-1 single-timer field. Deprecated: use ICEInactivityThreshold + // and RelayInactivityThreshold instead. Kept so existing callers + // (engine.go) compile during the Phase-2 transition; internally + // treated as RelayInactivityThreshold when the new fields are zero. InactivityThreshold *time.Duration + + // ICEInactivityThreshold is the per-peer ICE-worker idle timeout + // (Phase 2 / #5989). 0 = ICE always-on (= p2p-lazy semantics, where + // the whole tunnel goes idle but ICE is never torn down separately). + ICEInactivityThreshold time.Duration + + // RelayInactivityThreshold is the per-peer relay-worker idle timeout + // (Phase 2). 0 = relay always-on. + RelayInactivityThreshold time.Duration +} + +// resolvedTimeouts returns the effective (ICE, Relay) timeouts. If only +// the deprecated InactivityThreshold field is set, it maps onto the +// relay timeout for Phase-1 p2p-lazy semantics. +func (c Config) resolvedTimeouts() (iceTimeout, relayTimeout time.Duration) { + relay := c.RelayInactivityThreshold + if relay == 0 && c.InactivityThreshold != nil { + relay = *c.InactivityThreshold + } + return c.ICEInactivityThreshold, relay } // Manager manages lazy connections @@ -76,7 +100,13 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S } if wgIface.IsUserspaceBind() { - m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) + iceTO, relayTO := config.resolvedTimeouts() + if iceTO == 0 && relayTO == 0 { + // Phase 1 / single-timer fallback when caller hasn't migrated. + m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) //nolint:staticcheck // intentional Phase-1 single-timer fallback + } else { + m.inactivityManager = inactivity.NewManagerWithTwoTimers(wgIface, iceTO, relayTO) + } } else { log.Warnf("inactivity manager not supported for kernel mode, wait for remote peer to close the connection") } @@ -84,6 +114,18 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S return m } +// InactivityManager exposes the underlying inactivity.Manager so the +// engine / conn_mgr can subscribe to ICEInactiveChan / RelayInactiveChan +// in the p2p-dynamic mode lifecycle. Returns nil if the manager runs in +// kernel-bind mode (no inactivity tracking) or if the manager itself is +// nil (defensive). +func (m *Manager) InactivityManager() *inactivity.Manager { + if m == nil { + return nil + } + return m.inactivityManager +} + // UpdateRouteHAMap updates the HA group mappings for routes // This should be called when route configuration changes func (m *Manager) UpdateRouteHAMap(haMap route.HAMap) { @@ -537,6 +579,50 @@ func (m *Manager) onPeerActivity(peerConnID peerid.ConnID) { m.activateHAGroupPeers(mp.peerCfg) m.peerStore.PeerConnOpen(m.engineCtx, mp.peerCfg.PublicKey) + + // Phase 3.7i (#5989): the signal-trigger and activity-trigger paths + // must be symmetric. Signal-trigger goes through + // ConnMgr.ActivatePeer which calls conn.AttachICE for p2p-dynamic. + // Activity-trigger here previously went through PeerConnOpen only + // — Open() recreates workerICE but does NOT register the ICE + // listener on the handshaker (deferICEListener=true for + // p2p-dynamic). Without AttachICE the guard's onGuardEvent then + // sees readICEListener()==nil + everConnected==true and skips + // every offer with "will re-attach on real traffic" — but the + // only re-attach path is here, so we'd loop forever. + // + // AttachICE is mode-safe: a no-op for ModeP2P / ModeP2PLazy + // (listener already attached via Open) and an error for + // ModeRelayForced (workerICE nil) which we ignore. + // + // Reset iceBackoff first (Codex review 2026-05-05): the lazy-mgr + // activity-listener fires on a >32-byte type-4 outbound packet to + // a peer that's been fully Idle (Open=false, conn closed by relay- + // timeout). That's the strongest possible "user wants to talk to + // this peer" signal -- much stronger than the existing 3-tries-then- + // hourly retry policy. Without the reset, a previous transient + // ICE failure would keep the conn relay-only for an hour even + // after explicit user activity. We reset ONLY here in the lazy-mgr + // activity path; the relay-state activity path (engine wires + // ActivityRecorder.OnActivity -> Conn.AttachICEOnRelayActivity) does + // NOT reset, deliberately respecting the failure backoff because + // every relay payload packet would otherwise reset it. + if conn, ok := m.peerStore.PeerConn(mp.peerCfg.PublicKey); ok { + conn.ResetIceBackoff() + if err := conn.AttachICE(); err != nil { + mp.peerCfg.Log.Warnf("AttachICE on activity wake: %v", err) + } + // Phase 3.7i (#5989), Codex review 2026-05-05: also reset the + // guard's per-cycle ICE retry budget. After C->A Idle wake the + // Conn (and its guard) is freshly created, but the 3-retries- + // then-hourly counter is shared across the whole reconnect + // cycle. For peers with non-LAN candidates a single fresh + // pair-check cycle often needs all 3 tries (cold srflx + // mappings), and without an activity-driven reset the next + // real user packet would already find the guard in hourly + // mode -- defeating p2p-dynamic's "fast P2P recovery" promise. + conn.NotifyGuardActivity() + } } func (m *Manager) onPeerInactivityTimedOut(peerIDs map[string]struct{}) { diff --git a/client/internal/lazyconn/support.go b/client/internal/lazyconn/support.go index 5e765c2d6f4..1f9927d3864 100644 --- a/client/internal/lazyconn/support.go +++ b/client/internal/lazyconn/support.go @@ -15,6 +15,19 @@ func IsSupported(agentVersion string) bool { return true } + // Custom dev/CI builds with explicit prefix or embedded marker: + // "dev-089a95a", "ci-abcdef" (bare prefix form) + // "0.0.0-dev-1b923aad9", "0.0.0-ci-…" (semver-padded form used by + // build-android-lib.sh so + // version.NewVersion can parse) + // All come from the same source tree as the "development" build + // above; assume they support lazy. Only the random short-hash form + // (e.g. "a6c5960") lacks any prefix signal. + if strings.HasPrefix(agentVersion, "dev-") || strings.HasPrefix(agentVersion, "ci-") || + strings.Contains(agentVersion, "-dev-") || strings.Contains(agentVersion, "-ci-") { + return true + } + // filter out versions like this: a6c5960, a7d5c522, d47be154 if !strings.Contains(agentVersion, ".") { return false diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 1e416bfe707..899048f3bba 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -7,6 +7,7 @@ import ( "net/netip" "runtime" "sync" + "sync/atomic" "time" "github.com/pion/ice/v4" @@ -16,6 +17,7 @@ import ( "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/iface/wgproxy" "github.com/netbirdio/netbird/client/internal/metrics" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/peer/conntype" "github.com/netbirdio/netbird/client/internal/peer/dispatcher" "github.com/netbirdio/netbird/client/internal/peer/guard" @@ -86,11 +88,24 @@ type ConnConfig struct { // ICEConfig ICE protocol configuration ICEConfig icemaker.Config + + // Mode is the resolved connection mode for this peer (forwarded + // from the engine, which got it from the conn_mgr precedence chain). + // Phase 1 uses it to pick the skip-ICE branch when ModeRelayForced. + Mode connectionmode.Mode + + // P2pRetryMaxSeconds is the cap for the ICE-failure backoff schedule + // in p2p-dynamic mode. 0 = use built-in default (DefaultP2PRetryMax). + // Wire-format sentinel uint32-max (= ^uint32(0)) means "user-explicit + // disable", which the resolver translates to time.Duration(0) at + // engine.go before passing it here. Phase 3 of #5989. + P2pRetryMaxSeconds uint32 } type Conn struct { Log *log.Entry mu sync.Mutex + iceBackoff *iceBackoffState ctx context.Context ctxCancel context.CancelFunc config ConnConfig @@ -104,11 +119,30 @@ type Conn struct { onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) onDisconnected func(remotePeer string) rosenpassInitializedPresharedKeyValidator func(peerKey string) bool + // onWGTimeoutRecover, when set, is invoked from onWGDisconnected + // after the active worker has been closed. The handler should put + // this peer back into the lazy manager's idle/activity-listening + // state so the next outbound packet re-triggers the lazy mgr (and + // re-attaches ICE/relay). Without this hook the peer was stuck in + // "Connecting" forever after a WireGuard handshake timeout — the + // lazy mgr kept the peer in its "active" set with no activity + // listener, so local traffic was silently dropped. Codex follow-up + // to the 6-host hardware test on c9a47ed90. + onWGTimeoutRecover func() statusRelay *worker.AtomicWorkerStatus statusICE *worker.AtomicWorkerStatus currentConnPriority conntype.ConnPriority opened bool // this flag is used to prevent close in case of not opened connection + // everConnected is set to true the first time configureConnection + // or relay-only setup transitions this peer into a non-None + // priority. Codex follow-up: distinguishes the "ICE detached for + // inactivity" case (skip guard offer to avoid spam) from the + // "never connected yet" case (must send the bootstrap offer). + // Without this, the guard's first fire after lazy-mgr activity + // would incorrectly skip the initial offer because no ICE + // listener is attached YET. + everConnected atomic.Bool workerICE *WorkerICE workerRelay *WorkerRelay @@ -185,8 +219,24 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) - forceRelay := IsForceRelayed() - if !forceRelay { + // Phase 3: initialize per-peer ICE-failure backoff. The cap comes + // from the resolved P2pRetryMaxSeconds. 0 means "use built-in default". + backoffCap := time.Duration(conn.config.P2pRetryMaxSeconds) * time.Second + if backoffCap == 0 { + backoffCap = DefaultP2PRetryMax + } + if conn.iceBackoff == nil { + conn.iceBackoff = newIceBackoff(backoffCap) + } else { + conn.iceBackoff.SetMaxBackoff(backoffCap) + } + + // Mode-driven branching. ModeRelayForced skips ICE entirely; all + // other modes (P2P, P2PLazy, P2PDynamic) construct workerICE + // eagerly in Phase 1. Phase 2 will branch P2PDynamic separately + // to defer the OnNewOffer registration. + skipICE := conn.config.Mode == connectionmode.ModeRelayForced + if !skipICE { relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) if err != nil { @@ -198,11 +248,25 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages) conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer) - if !forceRelay { + + // ICE-listener registration depends on mode: + // - ModeRelayForced: skipICE=true, no workerICE, no listener. + // - ModeP2P, ModeP2PLazy: workerICE constructed, listener registered eagerly. + // P2PLazy's whole-tunnel deferral happens at the conn_mgr level, not here. + // - ModeP2PDynamic: workerICE constructed eagerly so it's ready, but the + // listener registration is deferred. The inactivity manager calls + // Conn.AttachICE() once activity is observed on the relay tunnel. + deferICEListener := conn.config.Mode == connectionmode.ModeP2PDynamic + if !skipICE && !deferICEListener { conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) } conn.guard = guard.NewGuard(conn.Log, conn.isConnectedOnAllWay, conn.config.Timeout, conn.srWatcher) + // Phase 3.5 (#5989): reset ICE backoff + recreate workerICE on network change. + // Set before Start() is called so the goroutine sees it without races. + if !skipICE { + conn.guard.SetOnNetworkChange(conn.onNetworkChange) + } conn.wg.Add(1) go func() { @@ -230,8 +294,22 @@ func (conn *Conn) Open(engineCtx context.Context) error { return nil } -// Close closes this peer Conn issuing a close event to the Conn closeCh -func (conn *Conn) Close(signalToRemote bool) { +// Close closes this peer Conn issuing a close event to the Conn closeCh. +// +// keepWgPeer controls whether the WireGuard peer entry is removed at +// the iface layer. Pass true on the lazy-suspend path +// (lazy-mgr deactivate, WG-timeout-recover) so that routed-subnet +// AllowedIPs the route-manager appended remain intact -- otherwise the +// peer goes Idle, comes back via the activity listener, and routed +// traffic to the peer's advertised subnets is silently dropped until +// the next mgmt-side reconcile re-attaches them. Pass false on the +// permanent-removal path (engine.removePeer, mode-change tear-down) +// where the peer should disappear from the WG iface entirely. +// +// See docs/bugs/2026-05-04-lazy-wake-on-routed-subnet.md for the full +// mechanism analysis. Regression tests live in +// conn_lazy_keepwgpeer_test.go. +func (conn *Conn) Close(signalToRemote bool, keepWgPeer bool) { conn.mu.Lock() defer conn.wgWatcherWg.Wait() defer conn.mu.Unlock() @@ -247,7 +325,7 @@ func (conn *Conn) Close(signalToRemote bool) { } } - conn.Log.Infof("close peer connection") + conn.Log.Infof("close peer connection (keepWgPeer=%v)", keepWgPeer) conn.ctxCancel() if conn.wgWatcherCancel != nil { @@ -274,8 +352,17 @@ func (conn *Conn) Close(signalToRemote bool) { conn.wgProxyICE = nil } - if err := conn.endpointUpdater.RemoveWgPeer(); err != nil { - conn.Log.Errorf("failed to remove wg endpoint: %v", err) + if !keepWgPeer { + if err := conn.endpointUpdater.RemoveWgPeer(); err != nil { + conn.Log.Errorf("failed to remove wg endpoint: %v", err) + } + } else { + // Lazy-suspend: keep the WG peer entry so route-manager-applied + // AllowedIPs (advertised subnets) survive the wake/sleep cycle. + // The lazy listener that runs next will UpdatePeer in-place to + // switch the endpoint to its fake 127.2.x.y target -- the + // AllowedIPs (peer-IP /32 + routed prefixes) stay intact. + conn.Log.Debugf("keeping WG peer entry across lazy-suspend so routed-subnet AllowedIPs survive") } if conn.evalStatus() == StatusConnected && conn.onDisconnected != nil { @@ -314,6 +401,14 @@ func (conn *Conn) SetOnDisconnected(handler func(remotePeer string)) { conn.onDisconnected = handler } +// SetOnWGTimeoutRecover wires the lazy-mgr recovery callback. ConnMgr +// installs this so that a WG-handshake-timeout pushes the peer back +// into the activity-listening idle state. See onWGTimeoutRecover docs +// on the Conn struct for the full rationale. +func (conn *Conn) SetOnWGTimeoutRecover(handler func()) { + conn.onWGTimeoutRecover = handler +} + // SetRosenpassInitializedPresharedKeyValidator sets a function to check if Rosenpass has taken over // PSK management for a peer. When this returns true, presharedKey() returns nil // to prevent UpdatePeer from overwriting the Rosenpass-managed PSK. @@ -398,10 +493,11 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn ep = directEp } - if conn.wgProxyRelay != nil { - conn.wgProxyRelay.Pause() - } - + // Bring the new ICE proxy up FIRST so the destination is ready to + // receive packets. Then update WG to use it. Only after WG has + // committed to the new endpoint do we pause the relay -- otherwise + // there is a 1-2 s window where relay is suspended but WG still + // points at it, dropping every packet in that window. if wgProxy != nil { wgProxy.Work() } @@ -420,9 +516,14 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn if conn.wgProxyRelay != nil { conn.Log.Debugf("redirect packets from relayed conn to WireGuard") conn.wgProxyRelay.RedirectAs(ep) + // Pause AFTER the redirect is wired up so any in-flight packet + // from the relay end has a forwarding path while WG converges + // onto the direct endpoint. + conn.wgProxyRelay.Pause() } conn.currentConnPriority = priority + conn.everConnected.Store(true) conn.statusICE.SetConnected() conn.updateIceState(iceConnInfo, updateTime) conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr, updateTime) @@ -464,9 +565,14 @@ func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { } else { conn.Log.Infof("ICE disconnected, do not switch to Relay. Reset priority to: %s", conntype.None.String()) conn.currentConnPriority = conntype.None - if err := conn.config.WgConfig.WgInterface.RemoveEndpointAddress(conn.config.WgConfig.RemoteKey); err != nil { - conn.Log.Errorf("failed to remove wg endpoint: %v", err) - } + // Intentionally NOT calling RemoveEndpointAddress here: a brief + // ICE flap (NAT rebind, signal hiccup) is followed within 1-2 s + // by a fresh ICE-connected callback that re-configures the WG + // endpoint. Actively removing the endpoint creates a no-endpoint + // window in which WG drops every packet rather than queuing on + // a slightly-stale address that the next ConfigureWGEndpoint + // will replace anyway. If the disconnect is permanent, WG's own + // keepalive timeout will surface the dead peer. } changed := conn.statusICE.Get() != worker.StatusDisconnected @@ -547,6 +653,7 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { conn.rosenpassRemoteKey = rci.rosenpassPubKey conn.currentConnPriority = conntype.Relay + conn.everConnected.Store(true) conn.statusRelay.SetConnected() conn.setRelayedProxy(wgProxy) conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey, updateTime) @@ -605,6 +712,52 @@ func (conn *Conn) handleRelayDisconnectedLocked() { } func (conn *Conn) onGuardEvent() { + // Suppress reconnect-offers under p2p-dynamic when the management + // server reports the remote peer as offline (live_online=false). The + // guard otherwise spams an offer every 5-30 s for up to relay_timeout + // minutes after the remote disappeared, and each offer that survives + // (when the remote reconnects) immediately wakes the lazy manager on + // the remote side -- defeating the user-visible "idle until traffic" + // promise of p2p-dynamic. Eager modes (p2p, relay-forced) keep the + // always-on behaviour because that's what those modes are for. + if conn.config.Mode == connectionmode.ModeP2PDynamic { + if state, err := conn.statusRecorder.GetPeer(conn.config.Key); err == nil { + if state.RemoteServerLivenessKnown && !state.RemoteLiveOnline { + conn.Log.Tracef("guard: skip offer (remote peer offline, p2p-dynamic)") + return + } + } + // Codex hardening audit: also skip when the guard is firing + // for "PartiallyConnected" (relay up, ICE detached) AND the + // detach was due to ICE-inactivity (the dynamic inactivity + // manager called DetachICEForPeer because no payload traffic + // for iceTimeout). Re-firing offers in that state wastes + // signal traffic and can wake the remote's lazy manager just + // to re-attach ICE that we'll detach again on the next idle + // cycle. The next REAL outbound packet on this peer will go + // through ConnMgr.ActivatePeer -> conn.AttachICE which DOES + // respect iceBackoff and is the correct path to re-engage ICE. + // + // Detection requires THREE conditions: + // 1. ICE worker exists but is detached (no listener), + // 2. no recorded ICE-failure-backoff (else the existing + // 3-tries-then-hourly retry policy handles it), + // 3. this Conn has been connected at least ONCE before (the + // everConnected flag). Without #3 we'd skip the very + // first bootstrap offer for a brand-new peer because + // its ICE listener is also nil before initial setup — + // regression caught during 6-host hardware test on + // 4998e5a58. + if conn.everConnected.Load() && + conn.handshaker != nil && conn.handshaker.readICEListener() == nil { + if state, err := conn.statusRecorder.GetPeer(conn.config.Key); err == nil { + if !state.IceBackoffSuspended && state.IceBackoffFailures == 0 { + conn.Log.Tracef("guard: skip offer (ICE detached for inactivity, p2p-dynamic; will re-attach on real traffic)") + return + } + } + } + } conn.dumpState.SendOffer() if err := conn.handshaker.SendOffer(); err != nil { conn.Log.Errorf("failed to send offer: %v", err) @@ -613,9 +766,9 @@ func (conn *Conn) onGuardEvent() { func (conn *Conn) onWGDisconnected() { conn.mu.Lock() - defer conn.mu.Unlock() if conn.ctx.Err() != nil { + conn.mu.Unlock() return } @@ -631,6 +784,18 @@ func (conn *Conn) onWGDisconnected() { default: conn.Log.Debugf("No active connection to close on WG timeout") } + + // Capture the callback before releasing the lock; we invoke it in a + // goroutine because it routes back into ConnMgr -> lazyConnMgr -> + // peerStore.PeerConnClose -> Conn.Close, which needs conn.mu (we + // hold it). Spawning a goroutine is fine — onWGDisconnected is itself + // fired from the WG-watcher goroutine, no caller waits on the result. + cb := conn.onWGTimeoutRecover + conn.mu.Unlock() + + if cb != nil { + go cb() + } } func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte, updateTime time.Time) { @@ -740,7 +905,7 @@ func (conn *Conn) isConnectedOnAllWay() (status guard.ConnStatus) { } return evalConnStatus(connStatusInputs{ - forceRelay: IsForceRelayed(), + forceRelay: conn.config.Mode == connectionmode.ModeRelayForced, peerUsesRelay: conn.workerRelay.IsRelayConnectionSupportedWithPeer(), relayConnected: conn.statusRelay.Get() == worker.StatusConnected, remoteSupportsICE: conn.handshaker.RemoteICESupported(), @@ -975,3 +1140,402 @@ func boolToConnStatus(connected bool) guard.ConnStatus { } return guard.ConnStatusDisconnected } + +// AttachICEOnRelayActivity is the relay-state fast-path triggered by +// ActivityRecorder when transport activity (>32-byte type-4 WG packet) +// is observed for a peer that's currently sitting in Relayed state +// (ICE worker detached on iceTimeout). Encapsulates Codex review-point- +// 4 gating so the engine doesn't have to peek into Conn internals: +// +// 1. mode must be p2p-dynamic (other modes have no detached state) +// 2. conn must be open (not yet closed by relay-timeout) +// 3. currentConnPriority must be Relay (we're using the relay tunnel) +// 4. handshaker.iceListener must be nil (ICE actually detached) +// 5. iceBackoff: by default skipped while suspended, BUT a rate- +// limited override applies (iceBackoff.AllowActivityOverride — +// one bypass per activityOverrideMinInterval=5min per peer). +// Codex review 2026-05-05 point 5: real user activity is the +// strongest "I want this peer back" signal, so a single override +// per 5min trades a bounded extra offer/answer pair for unsticking +// legitimately working peers that hit a transient ICE drop. +// 6. everConnected must be true (we had P2P at least once -- avoids +// pointless retries for peers we never reached P2P with) +// +// Returns true when AttachICE was actually called (caller can rate- +// limit further). The lazy-mgr.onPeerActivity path uses +// ResetIceBackoff (unconditional reset) because there the trigger is +// "user wants the peer back after full Idle" — that signal is even +// stronger than relay-state activity, so the stronger reset is OK. +// +// Phase 3.7i (#5989), Codex review 2026-05-05. +func (conn *Conn) AttachICEOnRelayActivity() (attempted bool) { + conn.mu.Lock() + if conn.config.Mode != connectionmode.ModeP2PDynamic { + conn.mu.Unlock() + return false + } + if !conn.opened { + conn.mu.Unlock() + return false + } + if conn.currentConnPriority != conntype.Relay { + conn.mu.Unlock() + return false + } + if conn.handshaker == nil || conn.handshaker.readICEListener() != nil { + conn.mu.Unlock() + return false + } + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + // Phase 3.7i (#5989), Codex review point 5 follow-up: activity- + // driven override of an active failure backoff. Rate-limited + // inside iceBackoff.AllowActivityOverride to one override per + // 5min per peer, so we never spam the signal server. Without + // this, a transient ICE drop on a flaky link (e.g. LTE NAT + // mapping recovery > 12s while the Guard's 3-fast-retries + // timer fires) leaves the peer permanently relay-only for an + // hour even when the user actively pings. + if conn.iceBackoff.AllowActivityOverride() { + conn.iceBackoff.Reset() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } + conn.Log.Infof("ICE backoff override on relay-activity (1x per %s rate limit)", "5min") + } else { + conn.mu.Unlock() + return false + } + } + if !conn.everConnected.Load() { + conn.mu.Unlock() + return false + } + // All gates passed; release the lock before calling AttachICE + // because AttachICE re-acquires it. + conn.mu.Unlock() + if err := conn.AttachICE(); err != nil { + conn.Log.Warnf("AttachICE on relay-activity: %v", err) + return false + } + // Phase 3.7i (#5989), Codex review 2026-05-05: also reset the + // guard's per-cycle ICE retry budget so the new pair-check cycle + // is not immediately throttled into hourly mode by 3 stale + // failures. The iceBackoff override above only handles the + // failure-suspension side; the guard runs a parallel 3-tries-then- + // hourly counter that is independent of iceBackoff. + if conn.guard != nil { + conn.guard.NotifyPeerActivity() + } + conn.Log.Debugf("ICE re-attached on relay-activity (relay -> P2P upgrade attempt)") + return true +} + +// NotifyGuardActivity forwards a peer-activity event to the underlying +// guard so it resets its per-cycle ICE retry budget and ticker. Safe +// to call even when the guard hasn't been created yet (returns +// silently). Phase 3.7i (#5989), Codex review 2026-05-05. +func (conn *Conn) NotifyGuardActivity() { + conn.mu.Lock() + g := conn.guard + conn.mu.Unlock() + if g != nil { + g.NotifyPeerActivity() + } +} + +// ResetIceBackoff hard-resets the per-peer ICE-failure backoff state +// (failure counter back to 0, suspended -> false, exponential schedule +// back to its initial interval, lastResetAt stamped). Intended for the +// lazy-mgr activity-trigger path: a transient ICE failure (e.g. +// concurrent wake-up race) otherwise enters "3 retries exhausted -> +// hourly retry" mode (guard/ice_retry_state.go:52) and the next +// legitimate activity sees AttachICE early-return on +// iceBackoff.IsSuspended() -> peer permanently stuck on relay. Called +// from lazyconn manager.onPeerActivity before AttachICE so real user +// traffic always gets a fresh ICE attempt. The signal-trigger path +// does NOT reset (it deliberately respects the failure backoff). +func (conn *Conn) ResetIceBackoff() { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.iceBackoff == nil { + return + } + conn.iceBackoff.Reset() + if conn.statusRecorder != nil { + // Codex review 2026-05-05 follow-up: keep status output (CLI + // `netbird status -d`, daemon RPC) in sync with the cleared + // backoff state so it doesn't continue to advertise a stale + // "suspended" / "Failures=N" snapshot after the reset. + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } +} + +// AttachICE registers the ICE-offer listener on the handshaker after the +// activity-detector observes traffic on the relay tunnel. Idempotent: if +// the listener is already attached, it is a no-op. Triggers a fresh offer +// so the remote side learns we are now ICE-capable. +// +// Used by p2p-dynamic mode: workerICE is created in Open() but the +// handshaker dispatch is deferred until traffic activity is seen. +func (conn *Conn) AttachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return nil + } + if conn.handshaker == nil { + return fmt.Errorf("AttachICE: handshaker not initialized (Open not called)") + } + if conn.workerICE == nil { + return fmt.Errorf("AttachICE: workerICE is nil (relay-forced mode)") + } + + if !conn.attachICEListenerLocked() { + return nil + } + + if err := conn.handshaker.SendOffer(); err != nil { + conn.Log.Warnf("AttachICE: SendOffer failed: %v", err) + } + return nil +} + +// attachICEListenerLocked attaches the ICE listener to the handshaker if it +// is not already attached. Returns true when a new attachment was made, +// false when the call was a no-op (already attached, ICE backoff suspended, +// handshaker not initialised, or workerICE not present). +// +// Caller MUST hold conn.mu. Used by: +// - AttachICE (signal-trigger path), which then issues SendOffer. +// - onNetworkChange (Phase 3.7e, #5989), which deliberately does NOT call +// SendOffer because the Guard reconnect-loop handles that. +// +// Honours iceBackoff.IsSuspended() so the failure-backoff is not bypassed. +func (conn *Conn) attachICEListenerLocked() bool { + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return false + } + if conn.handshaker == nil || conn.workerICE == nil { + return false + } + if conn.handshaker.readICEListener() != nil { + return false + } + + conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) + conn.Log.Debugf("ICE listener attached (locked path)") + return true +} + +// DetachICE removes the ICE-offer listener and tears down the ICE worker. +// Idempotent: if no listener is attached, it is a no-op. Used by +// p2p-dynamic mode when the inactivity manager fires the iceTimeout but +// the relay tunnel should stay up. +func (conn *Conn) DetachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.handshaker == nil { + return nil + } + if conn.handshaker.readICEListener() == nil { + return nil + } + + conn.handshaker.RemoveICEListener() + if conn.workerICE != nil { + conn.workerICE.Close() + } + conn.Log.Debugf("ICE listener detached (p2p-dynamic teardown)") + return nil +} + +// onICEFailed is invoked when pion's ICE agent reports +// ConnectionStateFailed. Increments the backoff counter and tears +// down the ICE worker. Phase 3 of #5989. +// +// Backoff sources are intentionally narrow (Codex review 2026-05-05): +// only Pion's ConnectionStateFailed counts as a "failure" worth +// pushing the exponential schedule forward. Inactivity-driven detach +// (DetachICEForPeer via ICEInactiveChan) and full-conn close (lazy-mgr +// relayTimeout) bypass markFailure entirely. So the backoff exclusively +// measures "ICE pair-checks broke after a real attempt", never +// "no traffic flowed for a while". +func (conn *Conn) onICEFailed() { + if conn.iceBackoff == nil { + return + } + // Distinguish failure types in the log so future debugging can + // tell apart "first-attempt couldn't pair" from "established P2P + // silently dropped" from "re-attach after detach failed". The + // classification is best-effort -- pion only tells us "Failed"; + // we infer from local state. + failType := "first-attempt" + switch { + case conn.everConnected.Load(): + failType = "post-success-drop" + case conn.handshaker != nil && conn.handshaker.readICEListener() != nil: + failType = "re-attach" + } + + delay := conn.iceBackoff.markFailure() + snap := conn.iceBackoff.Snapshot() + if delay > 0 { + conn.Log.Infof("ICE failure #%d (%s), suspending for %s, next retry at %s", + snap.Failures, + failType, + delay.Round(time.Second), + snap.NextRetry.Format("15:04:05")) + } + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, snap) + } + // Tear down ICE. Idempotent. Conn stays on relay. + if err := conn.DetachICE(); err != nil { + conn.Log.Warnf("DetachICE after onICEFailed: %v", err) + } +} + +// onICEConnected is invoked when pion's ICE agent reports +// ConnectionStateConnected. Resets the backoff. Phase 3 of #5989. +func (conn *Conn) onICEConnected() { + if conn.iceBackoff == nil { + return + } + if conn.iceBackoff.Snapshot().Failures > 0 { + conn.Log.Infof("ICE success, resetting backoff (was %d failures)", + conn.iceBackoff.Snapshot().Failures) + } + conn.iceBackoff.markSuccess() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } +} + +// SetIceBackoffMax updates the per-peer backoff cap. Called by ConnMgr +// when the server pushes a new p2p_retry_max_seconds value. If the +// iceBackoff is not yet initialized (Conn not opened yet), the value +// is stored in config so Open() picks it up. Phase 3 of #5989. +func (conn *Conn) SetIceBackoffMax(d time.Duration) { + conn.mu.Lock() + defer conn.mu.Unlock() + conn.config.P2pRetryMaxSeconds = uint32(d / time.Second) + if conn.iceBackoff != nil { + conn.iceBackoff.SetMaxBackoff(d) + } +} + +// IceBackoffSnapshot exposes the read-only backoff state for the +// status output (Task E1). Returns zero-value snapshot if no backoff +// is active. Phase 3 of #5989. +func (conn *Conn) IceBackoffSnapshot() BackoffSnapshot { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.iceBackoff == nil { + return BackoffSnapshot{} + } + return conn.iceBackoff.Snapshot() +} + +// onNetworkChange is invoked by Guard when the signal/relay layer +// reconnects after a network change (LTE-modem replug, WiFi roaming, etc.). +// Phase 3.5 of #5989. +// +// Resets the per-peer ICE-failure backoff (because the NAT topology may +// have changed -- previous failures do not predict future ones) AND +// recreates the workerICE wrapper so the next AttachICE/offer has a +// fresh pion-agent rather than one closed by a previous DetachICE call. +// +// Called from Guard's goroutine; acquires conn.mu, so it must not be +// invoked from a path that already holds conn.mu. +func (conn *Conn) onNetworkChange() { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.ctx.Err() != nil { + return + } + + if conn.iceBackoff != nil { + snap := conn.iceBackoff.Snapshot() + if snap.Failures > 0 { + conn.Log.Infof("network change detected, resetting ICE backoff (was %d failures)", + snap.Failures) + } + conn.iceBackoff.Reset() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } + } + + // We deliberately do NOT replace the workerICE wrapper here. Replacing + // it leaks underlying socket/iface bindings between the old and new + // instance, which empirically causes ICE to fail with a 13s pair-check + // timeout instead of converging in <1s like a fresh daemon-start does. + // + // We also deliberately do NOT call handshaker.SendOffer() here even + // though that was an earlier attempt. The Guard's reconnect-loop + // already issues sendOffer via its newReconnectTicker (800ms initial, + // up to ~4 retries in the first ~6s) right after the same srReconnect + // event that fires this callback. Adding our own SendOffer just creates + // a sending-offer storm: 5 offers per peer in 6 seconds, which on the + // remote side triggers repeated tear-down + reCreateAgent cycles in + // quick succession (each new sessionID forces it). That prevents ICE + // from ever completing its pair-checks. + // + // All we do here: close the current pion agent (sets w.agent = nil). + // The Guard's natural reconnect-loop then drives the next sendOffer, + // the remote responds with a fresh offer, and our existing OnNewOffer + // path (still attached to the unchanged workerICE wrapper) goes + // through the well-tested "agent==nil + new offer -> reCreateAgent" + // branch in worker_ice.go. + // + // Phase 3.7g (#5989): only tear down the workerICE agent when ICE is + // actually broken. If pion's lastKnownState is still Connected the + // peer-to-peer UDP path is alive end-to-end (typical for a brief + // signal-server outage where WG keepalives between peers continued + // to flow); closing the agent here would force a 15-25 s ICE + // renegotiation cycle plus a Relay→ICE handover gap that the user + // would observe as a ping dropout for no good reason. + // + // If ICE actually went Disconnected/Failed during the network event, + // pion has already cleared w.agent via onConnectionStateChange and + // the Close call below is a no-op anyway. Either way, a fresh remote + // OFFER will recreate the agent through the existing OnNewOffer path. + // + // In ModeRelayForced workerICE is nil; nothing to close. + if conn.workerICE != nil && !conn.workerICE.IsConnected() { + conn.workerICE.Close() + } else if conn.workerICE != nil { + conn.Log.Debugf("network change: skipping workerICE.Close (ICE still Connected, soft-fallback)") + } + + // Phase 3.7e (#5989): force the ICE listener back on after a network + // change. Empirically, after an LTE-modem replug the iceListener can + // end up detached for some peers (paths via onICEFailed → DetachICE + // after a Failed transition that we did not log because of timing, + // or via concurrent state changes during the bounce). Re-attaching + // on every signal in ConnMgr.ActivatePeer (Phase 3.7d) is necessary + // but not sufficient: by the time the next signal arrives, several + // remote OFFERs and the Guard's first sendOffer may already have + // been silently dropped at handshaker.Listen() because no listener + // was present. Re-attaching here closes that window deterministically. + // + // We do NOT call SendOffer from this path. The Guard's natural + // reconnect-ticker (newReconnectTicker, 800 ms initial) issues the + // next offer right after the same srReconnect event that drove this + // callback; sending an extra one creates the offer-storm that + // Phase 3.7b removed. + conn.attachICEListenerLocked() + + conn.Log.Debugf("ICE state reset on network change (agent closed; listener re-armed; Guard will resend offer)") +} diff --git a/client/internal/peer/conn_handover_order_test.go b/client/internal/peer/conn_handover_order_test.go new file mode 100644 index 00000000000..b5a3b6d50ec --- /dev/null +++ b/client/internal/peer/conn_handover_order_test.go @@ -0,0 +1,176 @@ +package peer + +import ( + "os" + "strings" + "testing" +) + +// Codex hardening regression: the Relay->ICE/P2P handover in +// conn.go's onICEConnected must call methods in this exact order to +// avoid a window where Relay is paused but WG still points at it +// (1-2 s of dropped packets): +// +// 1. wgProxy.Work() (new ICE proxy ready) +// 2. endpointUpdater.ConfigureWGEndpoint(...) (WG points at new EP) +// 3. wgProxyRelay.RedirectAs(ep) (drain in-flight relay) +// 4. wgProxyRelay.Pause() (stop relay last) +// +// The test below is a static-text check: it reads conn.go and asserts +// the FIRST occurrence of each landmark in onICEConnected appears in +// the expected order. A heavier behavioural test would need fake +// wgProxy/endpointUpdater plumbing; this version catches accidental +// reorders cheaply and points at the exact line numbers if the +// invariant is broken. +func TestConn_HandoverOrder_OnICEConnected(t *testing.T) { + src, err := os.ReadFile("conn.go") + if err != nil { + t.Fatalf("read conn.go: %v", err) + } + body := extractFunctionBody(t, string(src), "onICEConnectionIsReady") + + // Landmarks in expected order. Each entry is a substring; the test + // records the first index where it appears in the function body + // and asserts the indices increase monotonically. + landmarks := []string{ + "wgProxy.Work()", + "endpointUpdater.ConfigureWGEndpoint(", + "wgProxyRelay.RedirectAs(", + "wgProxyRelay.Pause()", + } + prev := -1 + for _, lm := range landmarks { + idx := strings.Index(body, lm) + if idx < 0 { + t.Errorf("landmark %q missing from onICEConnected — was the handover sequence refactored?", lm) + continue + } + if idx <= prev { + t.Errorf("landmark %q appears at index %d, must come AFTER previous landmark at %d", lm, idx, prev) + } + prev = idx + } +} + +// Codex follow-up regression: onGuardEvent's "skip offer for ICE +// detached due to inactivity" branch must be gated on everConnected +// being true. Without that gate, the guard skips the FIRST bootstrap +// offer for a brand-new peer (its ICE listener is also nil before +// initial setup), and the peer gets stuck in Connecting forever. +// This was caught during the 6-host hardware test on 4998e5a58 — +// dk20 saw 3 BM routers stuck in Connecting after ping wakeup +// because the bootstrap offer was being suppressed. +func TestConn_OnGuardEvent_SkipOfferGatedOnEverConnected(t *testing.T) { + src, err := os.ReadFile("conn.go") + if err != nil { + t.Fatalf("read conn.go: %v", err) + } + body := extractFunctionBody(t, string(src), "onGuardEvent") + // The skip-offer branch must reference everConnected.Load() in its + // guard. If a future refactor splits the conditions, the landmark + // "everConnected.Load()" should still appear ABOVE the + // "skip offer (ICE detached for inactivity" trace log to gate it. + const everCheck = "everConnected.Load()" + const skipTrace = "skip offer (ICE detached for inactivity" + idxEver := strings.Index(body, everCheck) + idxSkip := strings.Index(body, skipTrace) + if idxEver < 0 { + t.Fatalf("onGuardEvent missing everConnected.Load() guard — bootstrap offers will be suppressed for brand-new peers") + } + if idxSkip < 0 { + t.Fatalf("skip-offer trace landmark missing from onGuardEvent") + } + if idxEver > idxSkip { + t.Errorf("everConnected.Load() must appear BEFORE the skip-offer branch (got %d > %d)", idxEver, idxSkip) + } +} + +// Codex follow-up regression: onWGDisconnected MUST invoke +// onWGTimeoutRecover after closing the active worker — without it, +// the peer is stuck in "Connecting" forever because lazy mgr keeps +// it in active set with no activity listener (caught during the +// 6-host hardware test on c9a47ed90: dk20 saw 572a2/5731A frozen +// in Connecting after WG handshake timeout, 0/10 ping responses, +// no log activity for ~10min). +func TestConn_OnWGDisconnected_InvokesRecoverCallback(t *testing.T) { + src, err := os.ReadFile("conn.go") + if err != nil { + t.Fatalf("read conn.go: %v", err) + } + body := extractFunctionBody(t, string(src), "onWGDisconnected") + const cbField = "onWGTimeoutRecover" + if !strings.Contains(body, cbField) { + t.Fatalf("onWGDisconnected missing reference to %q — WG-timeout recovery is broken", cbField) + } + // The callback must be invoked AFTER the conn close switch (otherwise + // lazy mgr would be re-armed before the active workers are torn down). + idxClose := strings.Index(body, "workerRelay.CloseConn()") + idxCb := strings.Index(body, cbField) + if idxClose < 0 { + t.Fatalf("workerRelay.CloseConn() landmark missing") + } + if idxCb < idxClose { + t.Errorf("recover callback (idx %d) must come AFTER worker close (idx %d)", idxCb, idxClose) + } +} + +// Codex hardening regression: onICEStateDisconnected must NOT call +// RemoveEndpointAddress in the no-Relay-fallback branch. A stale +// endpoint is less disruptive than a guaranteed no-endpoint gap; the +// next successful path update replaces it. +func TestConn_HandoverOrder_OnICEDisconnected_NoRemoveEndpointAddress(t *testing.T) { + src, err := os.ReadFile("conn.go") + if err != nil { + t.Fatalf("read conn.go: %v", err) + } + body := extractFunctionBody(t, string(src), "onICEStateDisconnected") + // The whole function body must NOT contain a call to + // RemoveEndpointAddress. (Used to be there in the no-fallback + // branch; removed in Codex#8b 2026-05-03.) + if strings.Contains(body, "RemoveEndpointAddress(") { + t.Error("onICEStateDisconnected must NOT call RemoveEndpointAddress — it creates a no-endpoint gap during ICE flaps; see conn.go comment for rationale") + } +} + +// extractFunctionBody returns the source text between `func name(` and +// the closing brace at column 0 that follows. Crude but sufficient for +// these landmark checks. +func extractFunctionBody(t *testing.T, src, name string) string { + t.Helper() + marker := "func (conn *Conn) " + name + "(" + start := strings.Index(src, marker) + if start < 0 { + // Plain func form (no receiver) fallback. + marker = "func " + name + "(" + start = strings.Index(src, marker) + } + if start < 0 { + t.Fatalf("function %q not found in source", name) + } + // Find the closing brace at column 0 starting at the next newline + // after the opening line. Functions in NetBird's style are always + // indented with leading-tab and the closing } is at column 0. + rest := src[start:] + lines := strings.Split(rest, "\n") + var body strings.Builder + depth := 0 + openSeen := false + for _, line := range lines { + body.WriteString(line) + body.WriteByte('\n') + for _, ch := range line { + switch ch { + case '{': + depth++ + openSeen = true + case '}': + depth-- + if openSeen && depth == 0 { + return body.String() + } + } + } + } + t.Fatalf("function %q has unbalanced braces", name) + return "" +} diff --git a/client/internal/peer/conn_lazy_keepwgpeer_test.go b/client/internal/peer/conn_lazy_keepwgpeer_test.go new file mode 100644 index 00000000000..41e538bbf24 --- /dev/null +++ b/client/internal/peer/conn_lazy_keepwgpeer_test.go @@ -0,0 +1,132 @@ +package peer + +import ( + "os" + "strings" + "testing" +) + +// Regression test for the lazy-mode routed-subnet wake-up bug. +// +// Background: when a NetBird peer is also a routing peer (advertises +// subnets like 192.168.91.0/24 via NetBird Networks), the route-manager +// appends those subnets as AllowedIPs to the WG peer entry. When the +// lazy-connection-manager deactivates the peer (relay-inactivity +// timeout, WG-handshake-timeout-recover), the current code path calls +// peer.Conn.Close() which unconditionally calls +// endpointUpdater.RemoveWgPeer() -- which removes the ENTIRE WG peer +// entry, including the routed-subnet AllowedIPs. +// +// The lazy-listener then re-arms the peer with ONLY the basic peer-IP +// /32 AllowedIPs. The route-manager's allowedIPsRefCounter is unaware +// of the round-trip and does not re-apply the routed subnets. Until the +// next traffic to the peer's NetBird IP wakes the peer (and the +// route-manager's reconcile re-runs), routed-subnet traffic to those +// prefixes is silently dropped by WG. +// +// The fix introduces a `keepWgPeer bool` parameter on Close(). Lazy- +// suspend callers pass true (don't remove the WG peer entry, just +// suspend the data path); permanent-removal callers pass false +// (preserve the original behaviour for engine.removePeer / mode-change +// tear-down). +// +// This test is a static-text check: it asserts the Conn.Close signature +// exposes the keepWgPeer parameter AND that RemoveWgPeer is gated on +// it. A heavier behavioural test would need a stub WgInterface plus +// route-manager plumbing; this version catches accidental signature +// reverts cheaply and points at the exact landmark if the invariant is +// broken. +// +// Tracked in docs/bugs/2026-05-04-lazy-wake-on-routed-subnet.md. +func TestConn_Close_KeepWgPeerParameterPresent(t *testing.T) { + src, err := os.ReadFile("conn.go") + if err != nil { + t.Fatalf("read conn.go: %v", err) + } + body := string(src) + + // Signature landmark: Close must accept the keepWgPeer argument. + const sig = "func (conn *Conn) Close(signalToRemote bool, keepWgPeer bool)" + if !strings.Contains(body, sig) { + t.Errorf("Conn.Close signature missing keepWgPeer parameter — the lazy-suspend path will remove the WG peer and drop routed-subnet AllowedIPs (see docs/bugs/2026-05-04-lazy-wake-on-routed-subnet.md). Expected: %q", sig) + } + + // Gate landmark: RemoveWgPeer must be guarded by !keepWgPeer. + closeBody := extractFunctionBody(t, body, "Close") + const guarded = "if !keepWgPeer" + if !strings.Contains(closeBody, guarded) { + t.Errorf("Conn.Close body missing %q guard around endpointUpdater.RemoveWgPeer — without it, routed-subnet AllowedIPs are dropped on every lazy-suspend cycle", guarded) + } + const removeCall = "endpointUpdater.RemoveWgPeer()" + if !strings.Contains(closeBody, removeCall) { + t.Errorf("Conn.Close body missing %q call — the permanent-removal path still needs to remove the WG peer (only the lazy-suspend path keeps it)", removeCall) + } + + // The guard must appear BEFORE the call, otherwise RemoveWgPeer + // would always run. + guardIdx := strings.Index(closeBody, guarded) + callIdx := strings.Index(closeBody, removeCall) + if guardIdx < 0 || callIdx < 0 { + return // already reported above + } + if guardIdx > callIdx { + t.Errorf("guard %q (idx %d) must come BEFORE %q (idx %d)", guarded, guardIdx, removeCall, callIdx) + } +} + +// All call sites of Conn.Close in conn_mgr.go must pass an explicit +// keepWgPeer value chosen for the call's intent. The four documented +// call sites: +// +// * RelayInactiveChan handler -> keepWgPeer=true (lazy suspend) +// * RecoverPeerToIdle (WG-timeout) -> keepWgPeer=true (lazy suspend, Phase 3.7i) +// * RemovePeerConn -> keepWgPeer=false (permanent removal) +// * mode-change tear-down (resetPeers) -> keepWgPeer=false (full reopen) +// +// This test asserts no zero-arg or 1-arg Close call survives in +// conn_mgr.go after the fix. +func TestConnMgr_AllCloseCallersPassKeepWgPeer(t *testing.T) { + src, err := os.ReadFile("../conn_mgr.go") + if err != nil { + t.Fatalf("read conn_mgr.go: %v", err) + } + body := string(src) + + // Crude but effective: scan every line for ".Close(" on a peer.Conn + // receiver and confirm it has TWO arguments separated by a comma. + // Lines that match the legacy 1-arg form (e.g. ".Close(false)") are + // flagged. + lines := strings.Split(body, "\n") + for i, line := range lines { + if !strings.Contains(line, ".Close(") { + continue + } + // Filter to only peer-Conn-style closes (skip Logger.Close, etc). + // The peer-Conn form is ".Close(...)" where varname is + // usually "conn" / "peerConn" — exclude obvious non-peer Closes. + trim := strings.TrimSpace(line) + switch { + case strings.Contains(trim, "lazyConnMgr.Close"), + strings.Contains(trim, "activityManager.Close"), + strings.Contains(trim, "im.Close"), + strings.Contains(trim, "peerStore.Close"), + strings.Contains(trim, "// "), + !(strings.Contains(trim, "conn.Close(") || strings.Contains(trim, "peerConn.Close(")): + continue + } + // Now check the arg count. + open := strings.Index(line, ".Close(") + if open < 0 { + continue + } + args := line[open+len(".Close("):] + closeIdx := strings.Index(args, ")") + if closeIdx < 0 { + continue // multi-line call, give up + } + argList := strings.TrimSpace(args[:closeIdx]) + if !strings.Contains(argList, ",") { + t.Errorf("conn_mgr.go:%d: peer.Conn.Close call missing keepWgPeer second argument: %q", i+1, trim) + } + } +} diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 59216b647e9..58b8432bdd2 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "os" + "strings" "testing" "time" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/netbirdio/netbird/client/iface" @@ -281,6 +283,137 @@ func TestConn_presharedKey(t *testing.T) { } } +// TestConn_AttachICE_NilHandshaker verifies AttachICE errors when called +// before Open() has wired up the handshaker. +func TestConn_AttachICE_NilHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE on Conn with nil handshaker should return error") + } +} + +// TestConn_AttachICE_NilWorkerICE verifies AttachICE errors when the conn +// is in relay-forced mode (workerICE was never created). +func TestConn_AttachICE_NilWorkerICE(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + } + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE with nil workerICE should return error (relay-forced mode)") + } +} + +// TestConn_DetachICE_NoHandshaker is a no-op idempotency check: calling +// DetachICE before Open() must not panic and must not error. +func TestConn_DetachICE_NoHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE with nil handshaker should be no-op, got error: %v", err) + } +} + +// TestConn_DetachICE_ClearsListener verifies DetachICE removes the ICE +// listener from the handshaker. workerICE is left nil so Close() is skipped. +func TestConn_DetachICE_ClearsListener(t *testing.T) { + h := &Handshaker{} + h.AddICEListener(func(o *OfferAnswer) {}) + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: h, + } + + if h.readICEListener() == nil { + t.Fatal("precondition: handshaker should have a listener") + } + + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE returned error: %v", err) + } + + if h.readICEListener() != nil { + t.Fatal("DetachICE should clear the ICE listener") + } + + // Idempotent: second call is a no-op. + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE second call should be no-op, got: %v", err) + } +} + +func TestConn_AttachICE_NoOpWhenSuspended(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() // suspend it + + // AttachICE should return nil but not actually attach + err := c.AttachICE() + if err != nil { + t.Fatalf("expected nil error during backoff, got %v", err) + } + if c.handshaker.readICEListener() != nil { + t.Fatal("AttachICE during backoff must NOT register a listener") + } +} + +func TestConn_AttachICE_AfterBackoffExpiry(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + // Force nextRetry into the past + c.iceBackoff.mu.Lock() + c.iceBackoff.nextRetry = time.Now().Add(-1 * time.Second) + c.iceBackoff.mu.Unlock() + + // Without workerICE, AttachICE returns the "nil workerICE" error + // -- but we only care that the backoff gate is NOT engaged anymore. + err := c.AttachICE() + if err == nil { + t.Fatal("expected the relay-forced error path (nil workerICE)") + } + // The error should be about workerICE, not "suspended": + if errMsg := err.Error(); !strings.Contains(errMsg, "workerICE") { + t.Fatalf("after backoff expiry, error should be about workerICE not suspend; got %q", errMsg) + } +} + +func TestConn_OnICEFailed_MarksBackoffFailure(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + if c.iceBackoff.IsSuspended() { + t.Fatal("precondition: not suspended") + } + c.onICEFailed() + if !c.iceBackoff.IsSuspended() { + t.Fatal("after onICEFailed, must be suspended") + } + if c.iceBackoff.Snapshot().Failures != 1 { + t.Fatalf("failures must be 1, got %d", c.iceBackoff.Snapshot().Failures) + } +} + +func TestConn_OnICEConnected_ResetsBackoff(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + c.iceBackoff.markFailure() + c.onICEConnected() + snap := c.iceBackoff.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after onICEConnected: %+v", snap) + } +} + func TestConn_presharedKey_RosenpassManaged(t *testing.T) { conn := Conn{ config: ConnConfig{ diff --git a/client/internal/peer/env.go b/client/internal/peer/env.go index ed6a3af5391..fbee8f6808b 100644 --- a/client/internal/peer/env.go +++ b/client/internal/peer/env.go @@ -3,14 +3,32 @@ package peer import ( "os" "runtime" + "strconv" "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/shared/connectionmode" ) const ( + EnvKeyNBConnectionMode = "NB_CONNECTION_MODE" EnvKeyNBForceRelay = "NB_FORCE_RELAY" EnvKeyNBHomeRelayServers = "NB_HOME_RELAY_SERVERS" + + envEnableLazyConn = "NB_ENABLE_EXPERIMENTAL_LAZY_CONN" + envInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +var deprecationOnce sync.Map // env-var name -> *sync.Once + +// IsForceRelayed reports whether legacy NB_FORCE_RELAY is set, plus the +// runtime-special-case js (always relayed because of browser limitations). +// +// Deprecated: prefer ResolveModeFromEnv. Kept for callers that haven't +// migrated yet (Phase 1 backwards compat). func IsForceRelayed() bool { if runtime.GOOS == "js" { return true @@ -18,6 +36,65 @@ func IsForceRelayed() bool { return strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") } +// ResolveModeFromEnv reads all three legacy env vars plus the new +// NB_CONNECTION_MODE, applies the documented precedence and returns +// the resolved Mode and relay-timeout (in seconds, 0 if unset). +// +// Precedence: +// 1. NB_CONNECTION_MODE if parseable -> wins +// 2. NB_FORCE_RELAY=true -> ModeRelayForced (most-restrictive) +// 3. NB_ENABLE_EXPERIMENTAL_LAZY_CONN=true -> ModeP2PLazy +// 4. otherwise -> ModeUnspecified (caller falls through) +// +// NB_LAZY_CONN_INACTIVITY_THRESHOLD is parsed independently as the +// relay-timeout (alias) and emits a deprecation-warning if used. +func ResolveModeFromEnv() (connectionmode.Mode, uint32) { + mode := connectionmode.ModeUnspecified + + if raw := os.Getenv(EnvKeyNBConnectionMode); raw != "" { + parsed, err := connectionmode.ParseString(raw) + if err != nil { + log.Warnf("ignoring %s=%q: %v", EnvKeyNBConnectionMode, raw, err) + } else if parsed != connectionmode.ModeUnspecified { + mode = parsed + } + } + + if mode == connectionmode.ModeUnspecified { + if strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") { + warnDeprecated(EnvKeyNBForceRelay, EnvKeyNBConnectionMode+"=relay-forced") + mode = connectionmode.ModeRelayForced + } else if isLazyEnvTrue() { + warnDeprecated(envEnableLazyConn, EnvKeyNBConnectionMode+"=p2p-lazy") + mode = connectionmode.ModeP2PLazy + } + } + + timeoutSecs := uint32(0) + if raw := os.Getenv(envInactivityThreshold); raw != "" { + if d, err := time.ParseDuration(raw); err == nil { + timeoutSecs = uint32(d.Seconds()) + warnDeprecated(envInactivityThreshold, "the relay_timeout setting on the management server") + } else { + log.Warnf("ignoring %s=%q: %v", envInactivityThreshold, raw, err) + } + } + + return mode, timeoutSecs +} + +func isLazyEnvTrue() bool { + v, err := strconv.ParseBool(os.Getenv(envEnableLazyConn)) + return err == nil && v +} + +func warnDeprecated(envName, replacement string) { + once, _ := deprecationOnce.LoadOrStore(envName, &sync.Once{}) + once.(*sync.Once).Do(func() { + log.Warnf("env var %s is deprecated; use %s instead. The legacy var still works in this release but may be removed in a future major version.", envName, replacement) + }) +} + // OverrideRelayURLs returns the relay server URL list set in // NB_HOME_RELAY_SERVERS (comma-separated) and a boolean indicating whether // the override is active. When the env var is unset, the boolean is false diff --git a/client/internal/peer/env_test.go b/client/internal/peer/env_test.go new file mode 100644 index 00000000000..b70939243c6 --- /dev/null +++ b/client/internal/peer/env_test.go @@ -0,0 +1,58 @@ +package peer + +import ( + "testing" + + "github.com/netbirdio/netbird/shared/connectionmode" +) + +func TestResolveModeFromEnv(t *testing.T) { + cases := []struct { + name string + envConnMode string + envForceRelay string + envEnableLazy string + envInactivity string + wantMode connectionmode.Mode + wantTimeoutSecs uint32 + }{ + {"all unset", "", "", "", "", connectionmode.ModeUnspecified, 0}, + {"connection_mode wins", "p2p-dynamic", "true", "true", "10s", connectionmode.ModeP2PDynamic, 10}, + {"force_relay alone", "", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"lazy alone", "", "", "true", "", connectionmode.ModeP2PLazy, 0}, + {"force_relay AND lazy: force_relay wins", "", "true", "true", "", connectionmode.ModeRelayForced, 0}, + {"only inactivity threshold", "", "", "", "30m", connectionmode.ModeUnspecified, 1800}, + {"connection_mode unparsable falls through to legacy", "garbage", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"connection_mode parses p2p-lazy", "p2p-lazy", "", "", "", connectionmode.ModeP2PLazy, 0}, + {"force-relay value is true (case-insensitive)", "", "TRUE", "", "", connectionmode.ModeRelayForced, 0}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + t.Setenv(EnvKeyNBConnectionMode, c.envConnMode) + t.Setenv(EnvKeyNBForceRelay, c.envForceRelay) + t.Setenv("NB_ENABLE_EXPERIMENTAL_LAZY_CONN", c.envEnableLazy) + t.Setenv("NB_LAZY_CONN_INACTIVITY_THRESHOLD", c.envInactivity) + + gotMode, gotTimeout := ResolveModeFromEnv() + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotTimeout != c.wantTimeoutSecs { + t.Errorf("timeout = %v, want %v", gotTimeout, c.wantTimeoutSecs) + } + }) + } +} + +func TestIsForceRelayedBackwardsCompat(t *testing.T) { + // IsForceRelayed must remain functional for existing callers + // during the migration window (env.go still exposes it). + t.Setenv(EnvKeyNBForceRelay, "true") + if !IsForceRelayed() { + t.Error("IsForceRelayed() should return true when NB_FORCE_RELAY=true") + } + t.Setenv(EnvKeyNBForceRelay, "false") + if IsForceRelayed() { + t.Error("IsForceRelayed() should return false when NB_FORCE_RELAY=false") + } +} diff --git a/client/internal/peer/guard/guard.go b/client/internal/peer/guard/guard.go index 2e5efbcc5a3..4202f6c5384 100644 --- a/client/internal/peer/guard/guard.go +++ b/client/internal/peer/guard/guard.go @@ -37,6 +37,19 @@ type Guard struct { srWatcher *SRWatcher relayedConnDisconnected chan struct{} iCEConnDisconnected chan struct{} + // peerActivity is signalled by NotifyPeerActivity. Phase 3.7i + // (#5989), Codex review 2026-05-05: real user/transport activity + // must reset the per-cycle ICE retry budget so that a fresh ICE + // pair-check cycle runs instead of skipping straight into the + // hourly retry mode after 3 quick failures. Without this an Idle + // -> Wake to a peer with non-LAN candidates (srflx/relay-only) + // stays on Relay for up to an hour after the user explicitly + // pings, defeating p2p-dynamic's "fast P2P recovery" promise. + peerActivity chan struct{} + // onNetworkChange is called when signal/relay reconnects after a + // network change (e.g. LTE-modem replug, WiFi roaming). Set once + // before Start() is called; no lock needed. Phase 3.5 of #5989. + onNetworkChange func() } func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { @@ -47,9 +60,17 @@ func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duratio srWatcher: srWatcher, relayedConnDisconnected: make(chan struct{}, 1), iCEConnDisconnected: make(chan struct{}, 1), + peerActivity: make(chan struct{}, 1), } } +// SetOnNetworkChange registers a callback that fires whenever the +// signal/relay layer reconnects after a network change. Must be called +// before Start(). Phase 3.5 of #5989. +func (g *Guard) SetOnNetworkChange(cb func()) { + g.onNetworkChange = cb +} + func (g *Guard) Start(ctx context.Context, eventCallback func()) { g.log.Infof("starting guard for reconnection with MaxInterval: %s", g.timeout) g.reconnectLoopWithRetry(ctx, eventCallback) @@ -69,6 +90,25 @@ func (g *Guard) SetICEConnDisconnected() { } } +// NotifyPeerActivity signals that real user or transport activity for +// this peer has been observed. The reconnect loop resets its per-cycle +// ICE retry budget and (if currently in hourly mode) leaves hourly +// mode, so the next tick re-runs an ICE pair-check at the normal short +// cadence. Non-blocking: a buffered channel coalesces bursts. +// +// Callers: Conn.AttachICEOnRelayActivity (B->A relay-state activity) +// and lazyconn manager.onPeerActivity (C->A Idle wake). Phase 3.7i +// (#5989), Codex review 2026-05-05. +func (g *Guard) NotifyPeerActivity() { + if g == nil { + return + } + select { + case g.peerActivity <- struct{}{}: + default: + } +} + // reconnectLoopWithRetry periodically checks the connection status and sends offers to re-establish connectivity. // // Behavior depends on the connection state reported by isConnectedOnAllWay: @@ -124,12 +164,23 @@ func (g *Guard) reconnectLoopWithRetry(ctx context.Context, callback func()) { tickerChannel = ticker.C iceState.reset() + case <-g.peerActivity: + g.log.Debugf("peer activity, reset ICE retry budget and reconnection ticker") + ticker.Stop() + ticker = g.newReconnectTicker(ctx) + tickerChannel = ticker.C + iceState.reset() + case <-srReconnectedChan: g.log.Debugf("has network changes, reset reconnection ticker") ticker.Stop() ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C iceState.reset() + // Phase 3.5 (#5989): notify Conn to reset iceBackoff + recreate workerICE + if g.onNetworkChange != nil { + g.onNetworkChange() + } case <-ctx.Done(): g.log.Debugf("context is done, stop reconnect loop") diff --git a/client/internal/peer/guard/guard_test.go b/client/internal/peer/guard/guard_test.go new file mode 100644 index 00000000000..ad848b0f349 --- /dev/null +++ b/client/internal/peer/guard/guard_test.go @@ -0,0 +1,96 @@ +package guard + +import ( + "context" + "testing" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/peer/ice" +) + +// newTestGuard returns a Guard with a stubbed connStatusFunc and an +// SRWatcher whose listener channel is unused for the activity-event +// tests. The reconnectLoopWithRetry depends on srWatcher.NewListener() +// which we satisfy with a no-op SRWatcher (no signal/relay subsystems +// running). +func newTestGuard(t *testing.T, status connStatusFunc) (*Guard, *SRWatcher) { + t.Helper() + sr := NewSRWatcher(nil, nil, nil, ice.Config{}) + g := NewGuard(log.NewEntry(log.StandardLogger()), status, 30*time.Second, sr) + return g, sr +} + +// TestGuard_NotifyPeerActivity_NonBlockingCoalesce ensures NotifyPeerActivity +// drops bursts onto a buffered channel without blocking, matching the +// SetICEConnDisconnected pattern. Required so high-rate ActivityRecorder +// callbacks never stall the engine path. +func TestGuard_NotifyPeerActivity_NonBlockingCoalesce(t *testing.T) { + g, _ := newTestGuard(t, func() ConnStatus { return ConnStatusConnected }) + + done := make(chan struct{}) + go func() { + for i := 0; i < 1000; i++ { + g.NotifyPeerActivity() + } + close(done) + }() + select { + case <-done: + case <-time.After(time.Second): + t.Fatalf("NotifyPeerActivity blocked for >1s on a buffered channel") + } +} + +// TestGuard_NotifyPeerActivity_NilSafe documents the safety contract +// for callers (Conn / lazy-mgr) that may invoke this against a nil +// guard during a partially-initialised conn lifecycle. +func TestGuard_NotifyPeerActivity_NilSafe(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Fatalf("NotifyPeerActivity on nil guard panicked: %v", r) + } + }() + var g *Guard + g.NotifyPeerActivity() +} + +// TestGuard_PeerActivityResetsHourlyMode is the headline regression +// pin: a Guard whose iceRetryState is in hourly mode must, on a +// peerActivity event, restart the reconnect ticker and clear the +// hourly state, so the next tick runs the normal 3-budget cycle +// again. We exercise the channel handler indirectly through a custom +// loop that mirrors reconnectLoopWithRetry's relevant case. +func TestGuard_PeerActivityResetsHourlyMode(t *testing.T) { + g, _ := newTestGuard(t, func() ConnStatus { return ConnStatusPartiallyConnected }) + + iceState := &iceRetryState{log: g.log} + for i := 0; i < maxICERetries+1; i++ { + _ = iceState.shouldRetry() + } + iceState.enterHourlyMode() + if iceState.hourly == nil { + t.Fatalf("precondition: expected hourly mode armed") + } + + g.NotifyPeerActivity() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + select { + case <-g.peerActivity: + // simulate the reconnectLoop case body + iceState.reset() + case <-ctx.Done(): + t.Fatalf("peerActivity event was not delivered within 1s") + } + + if iceState.hourly != nil { + t.Fatalf("hourly ticker should be cleared after activity-driven reset") + } + if iceState.retries != 0 { + t.Fatalf("retries=%d after activity-driven reset, want 0", iceState.retries) + } +} diff --git a/client/internal/peer/guard/ice_retry_state_test.go b/client/internal/peer/guard/ice_retry_state_test.go index 6a5b5a76fc6..94c9030b56c 100644 --- a/client/internal/peer/guard/ice_retry_state_test.go +++ b/client/internal/peer/guard/ice_retry_state_test.go @@ -101,3 +101,35 @@ func TestICERetryState_ResetIsIdempotent(t *testing.T) { t.Fatalf("hourlyC non-nil after double reset") } } + +// TestICERetryState_ResetClearsHourlyAndBudget covers the Phase 3.7i +// scenario (Codex review 2026-05-05): a peer is in hourly mode after +// 3 cold srflx pair-check failures; an activity-driven reset must +// both clear the hourly ticker AND restore the full budget so the +// next pair-check cycle gets 3 fresh attempts at the short cadence +// before re-entering hourly. Without this property, a peer stays on +// relay for up to an hour after the user explicitly pings. +func TestICERetryState_ResetClearsHourlyAndBudget(t *testing.T) { + s := newTestRetryState() + for i := 0; i < maxICERetries+1; i++ { + _ = s.shouldRetry() + } + s.enterHourlyMode() + if s.hourlyC() == nil { + t.Fatalf("precondition: expected hourly mode armed") + } + + s.reset() + + if s.hourly != nil { + t.Fatalf("after activity-reset: hourly ticker must be cleared") + } + if s.retries != 0 { + t.Fatalf("after activity-reset: retries=%d, want 0", s.retries) + } + for i := 1; i <= maxICERetries; i++ { + if !s.shouldRetry() { + t.Fatalf("attempt %d after activity-reset returned false; full budget must be restored", i) + } + } +} diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index 1d44096b640..b4c787e9fce 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -104,9 +104,30 @@ func (h *Handshaker) AddRelayListener(offer func(remoteOfferAnswer *OfferAnswer) } func (h *Handshaker) AddICEListener(offer func(remoteOfferAnswer *OfferAnswer)) { + h.mu.Lock() + defer h.mu.Unlock() h.iceListener = offer } +// RemoveICEListener clears the ICE-offer listener so subsequent remote +// offers no longer dispatch to workerICE. Idempotent; calling it when +// no listener was set is a no-op. Used by Conn.DetachICE in p2p-dynamic +// mode to deactivate ICE without tearing down the relay path. +func (h *Handshaker) RemoveICEListener() { + h.mu.Lock() + defer h.mu.Unlock() + h.iceListener = nil +} + +// readICEListener returns the current ICE listener under mutex protection. +// Used by Listen() so a concurrent RemoveICEListener cannot race with the +// dispatch loop. +func (h *Handshaker) readICEListener() func(*OfferAnswer) { + h.mu.Lock() + defer h.mu.Unlock() + return h.iceListener +} + func (h *Handshaker) Listen(ctx context.Context) { for { select { @@ -124,8 +145,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote OFFER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } if err := h.sendAnswer(); err != nil { @@ -146,8 +170,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote ANSWER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } case <-ctx.Done(): h.log.Infof("stop listening for remote offers and answers") diff --git a/client/internal/peer/handshaker_test.go b/client/internal/peer/handshaker_test.go new file mode 100644 index 00000000000..fdc95411eb8 --- /dev/null +++ b/client/internal/peer/handshaker_test.go @@ -0,0 +1,50 @@ +package peer + +import ( + "testing" +) + +func TestHandshaker_AddRemoveICEListener(t *testing.T) { + h := &Handshaker{} + listener := func(o *OfferAnswer) {} + + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("iceListener should be set after AddICEListener") + } + + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("iceListener should be nil after RemoveICEListener") + } + + // Idempotency: removing again is a no-op. + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("RemoveICEListener should be idempotent") + } + + // Re-add works. + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("re-adding the listener should work") + } +} + +func TestHandshaker_readICEListener(t *testing.T) { + h := &Handshaker{} + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener on empty Handshaker should return nil") + } + + listener := func(o *OfferAnswer) {} + h.AddICEListener(listener) + if got := h.readICEListener(); got == nil { + t.Fatal("readICEListener after AddICEListener should return non-nil") + } + + h.RemoveICEListener() + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener after RemoveICEListener should return nil") + } +} diff --git a/client/internal/peer/ice_backoff.go b/client/internal/peer/ice_backoff.go new file mode 100644 index 00000000000..d435aca10a1 --- /dev/null +++ b/client/internal/peer/ice_backoff.go @@ -0,0 +1,210 @@ +package peer + +import ( + "sync" + "time" + + "github.com/cenkalti/backoff/v4" +) + +const ( + // DefaultP2PRetryMax is the built-in fallback when the management + // server has not pushed a p2p_retry_max_seconds value (Proto wire + // value 0 = "not set"). Phase 3 of #5989. + DefaultP2PRetryMax = 15 * time.Minute + + iceBackoffInitialInterval = 1 * time.Minute + iceBackoffMultiplier = 2.0 + iceBackoffRandomizationFactor = 0.1 + + // networkChangeGracePeriod is the window after Reset() (signal/relay + // reconnect, network-change event) during which markFailure caps the + // suspend delay at networkChangeRetryDelay. Phase 3.7f of #5989. + // + // Rationale: the first ICE pair-check after a network change often + // fails on stale NAT mappings, even when subsequent attempts succeed. + // Falling back to the normal 1-minute initial backoff after that + // single failure leaves the peer on relay for far longer than the + // underlying connectivity actually warrants. A short fixed delay + // inside the grace window lets follow-up attempts run while the new + // LTE/Wi-Fi mapping is still fresh; outside the window the normal + // exponential schedule applies as before. + // + // Phase 3.7h widened the window from 30 s to 60 s and reduced the + // retry delay from 5 s to 2 s after observing real-world LTE-bounce + // behaviour: cold NAT mappings often need 3-4 ICE attempts to prime, + // and the previous 30 s window only fit ~2 attempts (each pair-check + // is ~12-15 s) before the schedule jumped to a 1-minute exponential + // suspend. The wider window plus shorter delay typically fits ~4-5 + // attempts and recovers within ~50 s for peers behind a single NAT + // instead of 2-3 minutes. + networkChangeGracePeriod = 60 * time.Second + networkChangeRetryDelay = 2 * time.Second +) + +// iceBackoffState tracks per-peer ICE-failure backoff in p2p-dynamic +// mode. Phase 3 of #5989. +type iceBackoffState struct { + mu sync.Mutex + bo *backoff.ExponentialBackOff + failures int + nextRetry time.Time + suspended bool + maxBackoff time.Duration + lastResetAt time.Time +} + +// BackoffSnapshot is a read-only view used by the status output. +type BackoffSnapshot struct { + Failures int + NextRetry time.Time + Suspended bool +} + +func newIceBackoff(maxBackoff time.Duration) *iceBackoffState { + return &iceBackoffState{ + bo: buildBackoff(maxBackoff), + maxBackoff: maxBackoff, + } +} + +func buildBackoff(maxBackoff time.Duration) *backoff.ExponentialBackOff { + bo := backoff.NewExponentialBackOff() + bo.InitialInterval = iceBackoffInitialInterval + bo.Multiplier = iceBackoffMultiplier + bo.RandomizationFactor = iceBackoffRandomizationFactor + bo.MaxInterval = maxBackoff + bo.MaxElapsedTime = 0 + bo.Reset() + return bo +} + +func (s *iceBackoffState) IsSuspended() bool { + s.mu.Lock() + defer s.mu.Unlock() + if !s.suspended { + return false + } + if time.Now().After(s.nextRetry) { + return false + } + return true +} + +// markFailure increments the failure counter and computes the next retry +// time. Returns the delay so callers can log it. If maxBackoff is 0 +// (= disabled), returns 0 and does not modify state. +// +// Phase 3.7f of #5989: while we are still inside networkChangeGracePeriod +// after the most recent Reset() (typically a srReconnect / network-change +// event), the suspend delay is capped at networkChangeRetryDelay and the +// long-term exponential schedule is NOT advanced. Once the grace window +// elapses, normal exponential backoff applies. This lets the second ICE +// pair-check run while a fresh LTE/Wi-Fi NAT mapping is still warm, +// without flooding signaling for chronically broken peers. +func (s *iceBackoffState) markFailure() time.Duration { + s.mu.Lock() + defer s.mu.Unlock() + if s.maxBackoff == 0 { + return 0 + } + s.failures++ + + var delay time.Duration + if !s.lastResetAt.IsZero() && time.Since(s.lastResetAt) < networkChangeGracePeriod { + delay = networkChangeRetryDelay + } else { + delay = s.bo.NextBackOff() + } + + s.nextRetry = time.Now().Add(delay) + s.suspended = true + return delay +} + +func (s *iceBackoffState) Snapshot() BackoffSnapshot { + s.mu.Lock() + defer s.mu.Unlock() + return BackoffSnapshot{ + Failures: s.failures, + NextRetry: s.nextRetry, + Suspended: s.suspended && time.Now().Before(s.nextRetry), + } +} + +// markSuccess clears the failure counter and resets the internal backoff +// to its initial interval. Called when pion reports ConnectionStateConnected. +// +// Also stamps lastResetAt: a successful ICE connect is semantically the +// strongest "the path works" signal we have, so the post-network-change +// grace period (markFailure) and the activity-override rate limit +// (AllowActivityOverride) both honour it as a fresh reset point. Codex +// review 2026-05-05 caught the previous miss: without this stamp, +// Reset() and markSuccess() were inconsistent and AllowActivityOverride +// would have allowed an override immediately after a fresh successful +// connect, defeating its rate-limit intent. +func (s *iceBackoffState) markSuccess() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() + s.lastResetAt = time.Now() +} + +// Reset is the hard reset triggered by interface-change or mode-push. +// In addition to clearing the failure counter and exponential schedule, +// it stamps lastResetAt so that markFailure can apply the +// post-network-change grace period (Phase 3.7f). +func (s *iceBackoffState) Reset() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() + s.lastResetAt = time.Now() +} + +// activityOverrideMinInterval bounds how often relay-state activity +// can override an active ICE-failure backoff. The backoff exists to +// protect signal-server load against truly broken paths; user activity +// however is the strongest "I want this peer back" signal we have, so +// we allow ONE override per this window per peer. 5 min lines up with +// the relayTimeout default -- after one override window the conn would +// have cycled to Idle anyway, freeing the backoff via the C->A wake +// path which already does ResetIceBackoff. +const activityOverrideMinInterval = 5 * time.Minute + +// AllowActivityOverride returns true if a relay-state activity event +// is permitted to bypass an active backoff suspension. The caller is +// expected to call Reset() afterwards if true is returned. Guards +// against signal-storm by enforcing activityOverrideMinInterval since +// the last (success-, network-change-, or override-driven) reset. +// +// Phase 3.7i (#5989), Codex review 2026-05-05 point 5: "Optional +// maximal ein sehr bewusstes 'user activity retry override' mit harter +// Rate-Limitierung". This is that override, gated to once per 5min. +func (s *iceBackoffState) AllowActivityOverride() bool { + s.mu.Lock() + defer s.mu.Unlock() + if !s.suspended { + return false // not in backoff, nothing to override + } + if time.Since(s.lastResetAt) < activityOverrideMinInterval { + return false // too soon since last reset, respect rate limit + } + return true +} + +// SetMaxBackoff updates the cap. Called from ConnMgr.UpdatedRemotePeerConfig +// when the server pushes a new value. Rebuilds the internal backoff with +// the new schedule but preserves the failure counter. +func (s *iceBackoffState) SetMaxBackoff(d time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + if d == s.maxBackoff { + return + } + s.maxBackoff = d + s.bo = buildBackoff(d) +} diff --git a/client/internal/peer/ice_backoff_test.go b/client/internal/peer/ice_backoff_test.go new file mode 100644 index 00000000000..5bca1dc32bb --- /dev/null +++ b/client/internal/peer/ice_backoff_test.go @@ -0,0 +1,282 @@ +package peer + +import ( + "testing" + "time" +) + +// TestIceBackoff_AllowActivityOverride pins down the rate-limited +// "user-activity-overrides-hourly-backoff" semantic added 2026-05-05. +// Codex review caught that markSuccess() previously did NOT stamp +// lastResetAt, so this test specifically also covers the post-success +// path -- without the markSuccess fix the rate-limit window would have +// effectively never engaged after a brief successful connect cycle. +func TestIceBackoff_AllowActivityOverride(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + + // Not suspended -> no override needed + if s.AllowActivityOverride() { + t.Fatal("not suspended: must NOT allow override") + } + + // Suspended via markFailure + for i := 0; i < 3; i++ { + s.markFailure() + } + if !s.IsSuspended() { + t.Fatal("after 3 failures: must be suspended") + } + + // Recently reset (Reset just happened in newIceBackoff bo, but + // lastResetAt is zero — falls back to time.Since(zero) = forever + // which IS > 5min, so override IS allowed). To make the test + // deterministic, hard-Reset to stamp lastResetAt = now, then + // re-fail 3x to suspend. + s.Reset() + for i := 0; i < 3; i++ { + s.markFailure() + } + if !s.IsSuspended() { + t.Fatal("after Reset+3 failures: must be suspended") + } + // Now lastResetAt is fresh (within 5min) -> override DENIED + if s.AllowActivityOverride() { + t.Fatal("recently reset: must NOT allow override (rate-limit)") + } + + // Simulate >5min since last reset by stamping lastResetAt back + s.mu.Lock() + s.lastResetAt = time.Now().Add(-6 * time.Minute) + s.mu.Unlock() + if !s.AllowActivityOverride() { + t.Fatal("suspended + last reset >5min ago: MUST allow override") + } +} + +// TestIceBackoff_OnlyMarkFailureMutates pins the invariant Codex review +// 2026-05-05 asked us to make explicit: the backoff state is mutated +// by exactly three methods (markFailure, markSuccess, Reset) and by +// nothing else. In particular, the backoff must NEVER be triggered by +// inactivity-driven ICE-detach (DetachICEForPeer / lazy-mgr's +// ICEInactiveChan) or by full-conn-close (lazy-mgr relayTimeout). +// +// Test approach: spin a backoff, exercise the read-only paths +// (Snapshot, IsSuspended, AllowActivityOverride) repeatedly, then +// assert failures stayed at 0 and suspended stayed false. This proves +// that the read methods don't have side-effects that would +// accidentally enter the backoff state. +func TestIceBackoff_OnlyMarkFailureMutates(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + + for i := 0; i < 20; i++ { + _ = s.IsSuspended() + _ = s.Snapshot() + _ = s.AllowActivityOverride() + } + + if s.IsSuspended() { + t.Fatal("backoff must not be suspended after read-only calls") + } + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("read-only calls must not mutate state, got %+v", snap) + } +} + +// TestIceBackoff_MarkSuccessStampsLastResetAt is a direct regression +// pin for the Codex-found inconsistency: markSuccess MUST update +// lastResetAt so it counts as a reset point for the +// activity-override rate limit (and the markFailure grace period). +func TestIceBackoff_MarkSuccessStampsLastResetAt(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + // Force lastResetAt into the past + s.mu.Lock() + s.lastResetAt = time.Now().Add(-30 * time.Minute) + s.mu.Unlock() + + s.markSuccess() + + s.mu.Lock() + stamped := s.lastResetAt + s.mu.Unlock() + if time.Since(stamped) > time.Second { + t.Fatalf("markSuccess must stamp lastResetAt to ~now, got %v ago", time.Since(stamped)) + } +} + + +func TestIceBackoff_InitialState(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + if s.IsSuspended() { + t.Fatal("fresh state must not be suspended") + } + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("fresh state snapshot wrong: %+v", snap) + } +} + +func TestIceBackoff_SetMaxBackoff_Live(t *testing.T) { + s := newIceBackoff(1 * time.Minute) // tight cap + s.markFailure() // expect ~1m + s.markFailure() // expect ~1m (capped) + d2 := s.markFailure() // still ~1m + if d2 > 90*time.Second { + t.Errorf("with 1m cap, third failure should be ~1m, got %v", d2) + } + // Live update to 1h cap + s.SetMaxBackoff(60 * time.Minute) + // Subsequent failure produces a non-zero delay (jitter-dependent + // but should be > 0 since backoff was rebuilt). + d3 := s.markFailure() + if d3 <= 0 { + t.Errorf("after SetMaxBackoff: must produce non-zero delay, got %v", d3) + } +} + +func TestIceBackoff_SuccessReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + for i := 0; i < 5; i++ { + s.markFailure() + } + s.markSuccess() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after markSuccess: %+v", snap) + } + // Next failure must be back to step-1 magnitude (~1m) + delay := s.markFailure() + if delay > 70*time.Second { + t.Errorf("after success-reset, first failure must restart at ~1m, got %v", delay) + } +} + +func TestIceBackoff_HardReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + s.markFailure() + s.Reset() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after Reset: %+v", snap) + } +} + +func TestIceBackoff_SuspendedExpires(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + // Force nextRetry to past + s.mu.Lock() + s.nextRetry = time.Now().Add(-1 * time.Second) + s.mu.Unlock() + if s.IsSuspended() { + t.Fatal("expired suspend must report not suspended") + } +} + +func TestIceBackoff_ExponentialDoubling(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + expectedRanges := []struct { + min, max time.Duration + }{ + {50 * time.Second, 70 * time.Second}, // ~1m + {100 * time.Second, 140 * time.Second}, // ~2m + {210 * time.Second, 270 * time.Second}, // ~4m + {420 * time.Second, 540 * time.Second}, // ~8m + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + } + for i, exp := range expectedRanges { + delay := s.markFailure() + if delay < exp.min || delay > exp.max { + t.Errorf("failure #%d: delay %v outside expected range [%v, %v]", + i+1, delay, exp.min, exp.max) + } + } +} + +func TestIceBackoff_MaxBackoffOverride(t *testing.T) { + s := newIceBackoff(5 * time.Minute) // 300s cap + delays := []time.Duration{} + for i := 0; i < 5; i++ { + delays = append(delays, s.markFailure()) + } + // Last few should be capped at ~5m (300s) regardless of multiplier + for i := 2; i < 5; i++ { + if delays[i] > 6*time.Minute { + t.Errorf("failure #%d: delay %v exceeds 5m cap", i+1, delays[i]) + } + } +} + +func TestIceBackoff_MaxBackoffZero_Disabled(t *testing.T) { + s := newIceBackoff(0) + delay := s.markFailure() + if delay != 0 { + t.Errorf("disabled backoff must return 0 delay, got %v", delay) + } + if s.IsSuspended() { + t.Fatal("disabled backoff must not suspend") + } +} + +func TestIceBackoff_GracePeriodAfterReset_ShortDelay(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() // simulate srReconnect / network-change + + delay := s.markFailure() + if delay != networkChangeRetryDelay { + t.Fatalf("within grace window: expected %v, got %v", networkChangeRetryDelay, delay) + } + + // A second failure inside the grace window also uses the short delay + // (long-term exponential schedule is NOT advanced). + delay2 := s.markFailure() + if delay2 != networkChangeRetryDelay { + t.Fatalf("second failure inside grace: expected %v, got %v", networkChangeRetryDelay, delay2) + } +} + +func TestIceBackoff_GraceExpired_NormalExponential(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() + + // Force lastResetAt into the past so the grace window has expired. + s.mu.Lock() + s.lastResetAt = time.Now().Add(-2 * networkChangeGracePeriod) + s.mu.Unlock() + + delay := s.markFailure() + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("outside grace: expected ~1m exponential delay, got %v", delay) + } +} + +func TestIceBackoff_NoGraceWithoutReset(t *testing.T) { + // Fresh state without an explicit Reset must use the normal exponential + // schedule (lastResetAt is zero so the grace path does not apply). + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay < 50*time.Second { + t.Fatalf("fresh state without Reset: expected ~1m delay, got %v", delay) + } +} + +func TestIceBackoff_FirstFailure(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay <= 0 { + t.Fatalf("first failure must produce a positive delay, got %v", delay) + } + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("first failure delay should be ~1m (with 10%% jitter), got %v", delay) + } + if !s.IsSuspended() { + t.Fatal("after first failure must be suspended") + } + snap := s.Snapshot() + if snap.Failures != 1 || !snap.Suspended { + t.Fatalf("snapshot wrong: %+v", snap) + } +} diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index e8e61f660c9..4136acf1a8c 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -70,6 +70,41 @@ type State struct { RosenpassEnabled bool SSHHostKey []byte routes map[string]struct{} + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int + IceBackoffNextRetry time.Time + IceBackoffSuspended bool + // Phase 3.7i (#5989): timestamp of the last Idle→Connected + // transition that came up via the relay path. Used by the UI- + // label deriver to detect the brief "Relayed but ICE still + // trying" window after a wakeup so UIs can render that as + // "Relayed (negotiating P2P)" instead of plain "Relayed". + RelayActivatedAt time.Time + // Phase 3.7i (#5989): true = peer is in d.peers; false = in d.offlinePeers. + ServerOnline bool + RemoteEffectiveConnectionMode string + RemoteConfiguredConnectionMode string + RemoteEffectiveRelayTimeoutSecs uint32 + RemoteEffectiveP2PTimeoutSecs uint32 + RemoteEffectiveP2PRetryMaxSecs uint32 + RemoteConfiguredRelayTimeoutSecs uint32 + RemoteConfiguredP2PTimeoutSecs uint32 + RemoteConfiguredP2PRetryMaxSecs uint32 + RemoteGroups []string + RemoteLastSeenAtServer time.Time + // Phase 3.7i (#5989): live mgmt-server-tracked liveness flag from + // RemotePeerConfig.LiveOnline (= peer.Status.Connected on the server). + // True = peer is currently heartbeating to mgmt; false = configured + // but currently unreachable (hardware/network down). Used by the + // counter widget to distinguish "online" from "offline" in the + // user-intuitive sense, independent of the login-expiration split. + RemoteLiveOnline bool + // RemoteServerLivenessKnown is the explicit "I authoritatively know + // this peer's liveness" marker from a phase-3.7i+ management server. + // Old servers leave this false and the counter falls back to its + // LastSeenAtServer-zero heuristic; new servers set it true so the + // counter trusts RemoteLiveOnline directly. + RemoteServerLivenessKnown bool } // AddRoute add a single route to routes map @@ -160,6 +195,13 @@ type FullStatus struct { NumOfForwardingRules int LazyConnectionEnabled bool Events []*proto.SystemEvent + // Phase 3.7i (#5989): aggregate counters. + ConfiguredPeersTotal uint32 + ServerOnlinePeers uint32 + P2PConnectedPeers uint32 + RelayedConnectedPeers uint32 + IdleOnlinePeers uint32 + ServerOfflinePeers uint32 } type StatusChangeSubscription struct { @@ -219,6 +261,11 @@ type Status struct { routeIDLookup routeIDLookup wgIface WGIfaceStatus + + // Phase 3.7i (#5989): per-peer state-change subscription. Set by + // Engine; nil-checked everywhere. Fired AFTER releasing d.mux to + // avoid holding the lock through user code. + connStateListener func(pubkey string, st State) } // NewRecorder returns a new Status instance @@ -235,6 +282,59 @@ func NewRecorder(mgmAddress string) *Status { } } +// SetConnStateListener registers a callback that is called after each +// meaningful per-peer connection-state transition. The callback is +// invoked AFTER d.mux is released (Extract-Method pattern). Safe to +// call concurrently; may be set to nil to unregister. +// Phase 3.7i of #5989. +func (d *Status) SetConnStateListener(fn func(pubkey string, st State)) { + d.mux.Lock() + d.connStateListener = fn + d.mux.Unlock() +} + +// notifyConnStateChange returns a closure the caller invokes AFTER +// unlocking d.mux to deliver the state to the listener without holding +// the lock through user code. Caller must hold d.mux when calling this. +// Returns a no-op closure when no listener is registered. +func (d *Status) notifyConnStateChange(peerPubKey string, peerState State) func() { + listener := d.connStateListener + if listener == nil { + return func() {} + } + stateCopy := peerState + return func() { listener(peerPubKey, stateCopy) } +} + +// notifyPeerListChanged fires a peer-list-changed notification using the +// current peer count. Phase 3.7i: thin wrapper around the notifier so +// callers in UpdatePeerRemoteMeta and similar paths don't need to know +// about d.numOfPeers() and d.notifier internals. +// +// Caller must hold d.mux (this method reads d.peers/d.offlinePeers via +// numOfPeers and assumes consistent state). +// +//nolint:unused // wired up in a follow-up commit (UpdatePeerRemoteMeta path) +func (d *Status) notifyPeerListChanged() { + d.notifier.peerListChanged(d.numOfPeers()) +} + +// notifyPeerStateChangeListeners snapshots the per-peer router-state for +// peerID under the lock and dispatches it to registered subscribers in +// a goroutine, so the dispatch itself does not block on d.mux. Called +// when a peer's UI-relevant fields (LiveOnline, EffectiveConnectionMode, +// material ICE/Relay change) flip and subscribers need an immediate +// push instead of waiting for the next periodic poll. Phase 3.7i. +// +// Caller must hold d.mux when calling this. +func (d *Status) notifyPeerStateChangeListeners(peerID string) { + snapshot := d.snapshotRouterPeersLocked(peerID, true) + if snapshot == nil { + return + } + go d.dispatchRouterPeers(peerID, snapshot) +} + func (d *Status) SetRelayMgr(manager *relayClient.Manager) { d.mux.Lock() defer d.mux.Unlock() @@ -319,12 +419,21 @@ func (d *Status) RemovePeer(peerPubKey string) error { // UpdatePeerState updates peer status func (d *Status) UpdatePeerState(receivedState State) error { + notifyFn, err := d.updatePeerStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -357,7 +466,121 @@ func (d *Status) UpdatePeerState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasConnStatusChanged(oldState, receivedState.ConnStatus) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil +} + +// UpdatePeerIceBackoff updates the ICE-backoff snapshot for a peer. +// Called by Conn.onICEFailed / onICEConnected so that the daemon +// status reflects current backoff state. Phase 3 of #5989. +func (d *Status) UpdatePeerIceBackoff(pubKey string, snap BackoffSnapshot) { + d.mux.Lock() + defer d.mux.Unlock() + + peerState, ok := d.peers[pubKey] + if !ok { + return + } + peerState.IceBackoffFailures = snap.Failures + peerState.IceBackoffNextRetry = snap.NextRetry + peerState.IceBackoffSuspended = snap.Suspended + d.peers[pubKey] = peerState +} + +// RemoteMeta is the slice of per-peer fields RemotePeerConfig populates. +// Phase 3.7i of #5989. +type RemoteMeta struct { + EffectiveConnectionMode string + EffectiveRelayTimeoutSecs uint32 + EffectiveP2PTimeoutSecs uint32 + EffectiveP2PRetryMaxSecs uint32 + ConfiguredConnectionMode string + ConfiguredRelayTimeoutSecs uint32 + ConfiguredP2PTimeoutSecs uint32 + ConfiguredP2PRetryMaxSecs uint32 + Groups []string + LastSeenAtServer time.Time + LiveOnline bool + ServerLivenessKnown bool +} + +// UpdatePeerRemoteMeta sets the RemotePeerConfig-derived fields on the +// peer's State without touching ConnStatus or transport stats. Looks up +// the peer in both online (d.peers) and offline (d.offlinePeers) maps. +// +// Phase 3.7i (Codex finding 3): when a UI-relevant field flips +// (LiveOnline, ServerLivenessKnown, EffectiveConnectionMode) we fire +// notifyPeerListChanged so the Android home/peers fragments — which +// only refresh on OnPeersListChanged — pick up the change immediately. +// Otherwise the user sees the change at most ~30 s later when the +// daemon-RPC poller next runs. +func (d *Status) UpdatePeerRemoteMeta(pubKey string, meta RemoteMeta) error { + d.mux.Lock() + defer d.mux.Unlock() + var notify bool + st, online := d.peers[pubKey] + if online { + notify = st.RemoteLiveOnline != meta.LiveOnline || + st.RemoteServerLivenessKnown != meta.ServerLivenessKnown || + st.RemoteEffectiveConnectionMode != meta.EffectiveConnectionMode + st.RemoteEffectiveConnectionMode = meta.EffectiveConnectionMode + st.RemoteConfiguredConnectionMode = meta.ConfiguredConnectionMode + st.RemoteEffectiveRelayTimeoutSecs = meta.EffectiveRelayTimeoutSecs + st.RemoteEffectiveP2PTimeoutSecs = meta.EffectiveP2PTimeoutSecs + st.RemoteEffectiveP2PRetryMaxSecs = meta.EffectiveP2PRetryMaxSecs + st.RemoteConfiguredRelayTimeoutSecs = meta.ConfiguredRelayTimeoutSecs + st.RemoteConfiguredP2PTimeoutSecs = meta.ConfiguredP2PTimeoutSecs + st.RemoteConfiguredP2PRetryMaxSecs = meta.ConfiguredP2PRetryMaxSecs + st.RemoteGroups = meta.Groups + st.RemoteLastSeenAtServer = meta.LastSeenAtServer + st.RemoteLiveOnline = meta.LiveOnline + st.RemoteServerLivenessKnown = meta.ServerLivenessKnown + d.peers[pubKey] = st + if notify { + d.notifyPeerListChanged() + d.notifyPeerStateChangeListeners(pubKey) + } + return nil + } + for i := range d.offlinePeers { + if d.offlinePeers[i].PubKey == pubKey { + notify = d.offlinePeers[i].RemoteLiveOnline != meta.LiveOnline || + d.offlinePeers[i].RemoteServerLivenessKnown != meta.ServerLivenessKnown || + d.offlinePeers[i].RemoteEffectiveConnectionMode != meta.EffectiveConnectionMode + d.offlinePeers[i].RemoteEffectiveConnectionMode = meta.EffectiveConnectionMode + d.offlinePeers[i].RemoteConfiguredConnectionMode = meta.ConfiguredConnectionMode + d.offlinePeers[i].RemoteEffectiveRelayTimeoutSecs = meta.EffectiveRelayTimeoutSecs + d.offlinePeers[i].RemoteEffectiveP2PTimeoutSecs = meta.EffectiveP2PTimeoutSecs + d.offlinePeers[i].RemoteEffectiveP2PRetryMaxSecs = meta.EffectiveP2PRetryMaxSecs + d.offlinePeers[i].RemoteConfiguredRelayTimeoutSecs = meta.ConfiguredRelayTimeoutSecs + d.offlinePeers[i].RemoteConfiguredP2PTimeoutSecs = meta.ConfiguredP2PTimeoutSecs + d.offlinePeers[i].RemoteConfiguredP2PRetryMaxSecs = meta.ConfiguredP2PRetryMaxSecs + d.offlinePeers[i].RemoteGroups = meta.Groups + d.offlinePeers[i].RemoteLastSeenAtServer = meta.LastSeenAtServer + d.offlinePeers[i].RemoteLiveOnline = meta.LiveOnline + d.offlinePeers[i].RemoteServerLivenessKnown = meta.ServerLivenessKnown + if notify { + d.notifyPeerListChanged() + } + return nil + } + } + return fmt.Errorf("peer %s not found in either map", pubKey) +} + +// TimestampOrZero converts a *timestamppb.Timestamp to time.Time, +// returning zero-time when the proto pointer is nil. Used by engine.go +// (Task 3.3) when populating RemoteMeta from RemotePeerConfig where +// last_seen_at_server may be unset for peers that pre-date Phase 3.7i. +// Phase 3.7i of #5989. +func TimestampOrZero(t *timestamppb.Timestamp) time.Time { + if t == nil { + return time.Time{} + } + return t.AsTime() } func (d *Status) AddPeerStateRoute(peer string, route string, resourceId route.ResID) error { @@ -421,16 +644,25 @@ func (d *Status) CheckRoutes(ip netip.Addr) ([]byte, bool) { } func (d *Status) UpdatePeerICEState(receivedState State) error { + notifyFn, err := d.updatePeerICEStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerICEStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } - oldState := peerState.ConnStatus - oldIsRelayed := peerState.Relayed + oldSnapshot := peerState + oldStatus := peerState.ConnStatus peerState.ConnStatus = receivedState.ConnStatus peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate @@ -443,10 +675,11 @@ func (d *Status) UpdatePeerICEState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) - notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + notifyList := hasConnStatusChanged(oldStatus, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) numPeers := d.numOfPeers() + materialICE := hasMaterialICEChange(oldSnapshot, peerState) d.mux.Unlock() @@ -456,20 +689,38 @@ func (d *Status) UpdatePeerICEState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + if materialICE { + d.notifyPeerStateChangeListeners(receivedState.PubKey) + } + + if hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerRelayedState(receivedState State) error { + notifyFn, err := d.updatePeerRelayedStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerRelayedStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } - oldState := peerState.ConnStatus - oldIsRelayed := peerState.Relayed + oldSnapshot := peerState + oldStatus := peerState.ConnStatus + oldRelayed := peerState.Relayed + wasIdle := oldStatus == StatusIdle peerState.ConnStatus = receivedState.ConnStatus peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate @@ -477,12 +728,26 @@ func (d *Status) UpdatePeerRelayedState(receivedState State) error { peerState.RelayServerAddress = receivedState.RelayServerAddress peerState.RosenpassEnabled = receivedState.RosenpassEnabled + // Phase 3.7i: track the moment we transitioned from Idle to + // Connected-via-Relay so the UI-label deriver can detect the brief + // window where ICE is still trying to upgrade us to P2P. The + // transition fires on Idle->Connected when relayed=true (the + // typical wakeup path: relay opens fast, ICE follows). Plain + // not-relayed->relayed (e.g. ICE failure mid-session) also stamps + // so the label can show the negotiation attempt for those too. + relayedNow := receivedState.Relayed && receivedState.ConnStatus == StatusConnected + relayedBefore := oldRelayed && oldStatus == StatusConnected + if relayedNow && (wasIdle || !relayedBefore) { + peerState.RelayActivatedAt = time.Now() + } + d.peers[receivedState.PubKey] = peerState - notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) - notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + notifyList := hasConnStatusChanged(oldStatus, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) numPeers := d.numOfPeers() + materialRelay := hasMaterialRelayChange(oldSnapshot, peerState) d.mux.Unlock() @@ -492,16 +757,32 @@ func (d *Status) UpdatePeerRelayedState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + if materialRelay { + d.notifyPeerStateChangeListeners(receivedState.PubKey) + } + + if hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error { + notifyFn, err := d.updatePeerRelayedStateToDisconnectedLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerRelayedStateToDisconnectedLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -527,16 +808,29 @@ func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { + notifyFn, err := d.updatePeerICEStateToDisconnectedLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerICEStateToDisconnectedLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -565,7 +859,11 @@ func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } // UpdateWireGuardPeerState updates the WireGuard bits of the peer state @@ -595,6 +893,47 @@ func hasConnStatusChanged(oldStatus, newStatus ConnStatus) bool { return newStatus != oldStatus } +// hasMaterialICEChange returns true when any field that the management +// server's "endpoint flips immediate" UX promise depends on has moved. +// Beyond the status/relayed flip already covered by hasStatusOrRelayedChange, +// this catches: +// - Local/remote ICE candidate endpoint changes (NAT-traversal roaming) +// - Local/remote ICE candidate type changes (host -> srflx -> relay) +// +// Without this an in-place endpoint flip would only surface to the +// dashboard at the next 60 s heartbeat tick. +func hasMaterialICEChange(oldState, newState State) bool { + if hasStatusOrRelayedChange(oldState.ConnStatus, newState.ConnStatus, oldState.Relayed, newState.Relayed) { + return true + } + if oldState.LocalIceCandidateEndpoint != newState.LocalIceCandidateEndpoint { + return true + } + if oldState.RemoteIceCandidateEndpoint != newState.RemoteIceCandidateEndpoint { + return true + } + if oldState.LocalIceCandidateType != newState.LocalIceCandidateType { + return true + } + if oldState.RemoteIceCandidateType != newState.RemoteIceCandidateType { + return true + } + return false +} + +// hasMaterialRelayChange returns true when relayed-state material fields +// have changed. Beyond status/relayed, this catches relay-server flips +// (a peer being moved to a different relay endpoint). +func hasMaterialRelayChange(oldState, newState State) bool { + if hasStatusOrRelayedChange(oldState.ConnStatus, newState.ConnStatus, oldState.Relayed, newState.Relayed) { + return true + } + if oldState.RelayServerAddress != newState.RelayServerAddress { + return true + } + return false +} + // UpdatePeerFQDN update peer's state fqdn only func (d *Status) UpdatePeerFQDN(peerPubKey, fqdn string) error { d.mux.Lock() @@ -1042,11 +1381,63 @@ func (d *Status) GetFullStatus() FullStatus { fullStatus.LocalPeerState = d.localPeer + var p2p, relayed, idle, offline uint32 + + // Phase 3.7i (#5989) counter semantics: + // ServerOnline := peer.Status.Connected on the management server + // (RemotePeerConfig.live_online → State.RemoteLiveOnline) + // Offline := configured but NOT live (heartbeat is stale OR + // login expired). For login-expired peers, the + // daemon already places them in d.offlinePeers via + // updateOfflinePeers; the rest live in d.peers + // regardless of their live status, so we additionally + // check RemoteLiveOnline. + // + // Backward-compat fallback: if the management server pre-dates + // Phase 3.7i, RemoteServerLivenessKnown is false (zero value of the + // never-populated proto field). In that case we cannot trust + // LiveOnline so we fall back to the legacy heuristic: assume online + // unless LastSeenAtServer is set AND LiveOnline is explicitly false. + // Phase-3.7i+ servers set ServerLivenessKnown=true and we then trust + // LiveOnline directly — both for "yes online" and "no offline". for _, status := range d.peers { + var isLive bool + if status.RemoteServerLivenessKnown { + isLive = status.RemoteLiveOnline + } else { + mgmtKnowsLiveness := !status.RemoteLastSeenAtServer.IsZero() + isLive = status.RemoteLiveOnline || !mgmtKnowsLiveness + } + if isLive { + status.ServerOnline = true + switch { + case status.ConnStatus == StatusConnected && !status.Relayed: + p2p++ + case status.ConnStatus == StatusConnected && status.Relayed: + relayed++ + default: + idle++ + } + } else { + status.ServerOnline = false + offline++ + } + fullStatus.Peers = append(fullStatus.Peers, status) + } + for _, status := range d.offlinePeers { + // Login-expired peers are always offline. + status.ServerOnline = false + offline++ fullStatus.Peers = append(fullStatus.Peers, status) } - fullStatus.Peers = append(fullStatus.Peers, d.offlinePeers...) + fullStatus.P2PConnectedPeers = p2p + fullStatus.RelayedConnectedPeers = relayed + fullStatus.IdleOnlinePeers = idle + fullStatus.ServerOfflinePeers = offline + fullStatus.ServerOnlinePeers = p2p + relayed + idle + fullStatus.ConfiguredPeersTotal = fullStatus.ServerOnlinePeers + offline + fullStatus.Events = d.GetEventHistory() return fullStatus } @@ -1324,6 +1715,14 @@ func (fs FullStatus) ToProto() *proto.FullStatus { pbFullStatus.NumberOfForwardingRules = int32(fs.NumOfForwardingRules) pbFullStatus.LazyConnectionEnabled = fs.LazyConnectionEnabled + // Phase 3.7i (#5989): aggregate counters. + pbFullStatus.ConfiguredPeersTotal = fs.ConfiguredPeersTotal + pbFullStatus.ServerOnlinePeers = fs.ServerOnlinePeers + pbFullStatus.P2PConnectedPeers = fs.P2PConnectedPeers + pbFullStatus.RelayedConnectedPeers = fs.RelayedConnectedPeers + pbFullStatus.IdleOnlinePeers = fs.IdleOnlinePeers + pbFullStatus.ServerOfflinePeers = fs.ServerOfflinePeers + pbFullStatus.LocalPeerState.Networks = maps.Keys(fs.LocalPeerState.Routes) for _, peerState := range fs.Peers { @@ -1348,6 +1747,24 @@ func (fs FullStatus) ToProto() *proto.FullStatus { Networks: networks, Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, + // Phase 3.7i (#5989): per-peer remote meta fields. + ServerOnline: peerState.ServerOnline, + Groups: peerState.RemoteGroups, + EffectiveConnectionMode: peerState.RemoteEffectiveConnectionMode, + ConfiguredConnectionMode: peerState.RemoteConfiguredConnectionMode, + EffectiveRelayTimeoutSecs: peerState.RemoteEffectiveRelayTimeoutSecs, + EffectiveP2PTimeoutSecs: peerState.RemoteEffectiveP2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: peerState.RemoteEffectiveP2PRetryMaxSecs, + ConfiguredRelayTimeoutSecs: peerState.RemoteConfiguredRelayTimeoutSecs, + ConfiguredP2PTimeoutSecs: peerState.RemoteConfiguredP2PTimeoutSecs, + ConfiguredP2PRetryMaxSecs: peerState.RemoteConfiguredP2PRetryMaxSecs, + ConnectionTypeExtended: DeriveConnectionTypeExtended(peerState), + } + if !peerState.RemoteLastSeenAtServer.IsZero() { + pbPeerState.LastSeenAtServer = timestamppb.New(peerState.RemoteLastSeenAtServer) } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } @@ -1387,3 +1804,37 @@ func (fs FullStatus) ToProto() *proto.FullStatus { return &pbFullStatus } + +// negotiationWindow is how long after Idle->Connected-via-Relay we +// continue to label the connection as "Relayed (negotiating P2P)". +// After this window, if ICE has not succeeded, the label settles to +// plain "Relayed". 5 s comfortably covers typical pion-ICE handshake +// times (gather candidates -> connectivity check -> pair selected) on +// LAN and most cellular setups. +const negotiationWindow = 5 * time.Second + +// DeriveConnectionTypeExtended produces the UI-friendly connection- +// type string that all clients render verbatim. See the proto field +// docstring for the full value list. Centralizing the derivation in +// the daemon keeps Android/Windows/Dashboard consistent without each +// client re-implementing the (relayed && ice-still-trying) heuristic. +func DeriveConnectionTypeExtended(s State) string { + if s.ConnStatus != StatusConnected { + return "" + } + if !s.Relayed { + return "P2P" + } + // Relayed=true. Either ICE is still negotiating (transient) or it + // has given up (permanent for this session). IceBackoffSuspended + // means iceBackoff has scheduled a future retry — i.e. ICE failed + // at least once and is in cool-down, so we should NOT label this + // as "negotiating". + if s.IceBackoffSuspended || s.IceBackoffFailures > 0 { + return "Relayed" + } + if !s.RelayActivatedAt.IsZero() && time.Since(s.RelayActivatedAt) < negotiationWindow { + return "Relayed (negotiating P2P)" + } + return "Relayed" +} diff --git a/client/internal/peer/status_debounce_test.go b/client/internal/peer/status_debounce_test.go new file mode 100644 index 00000000000..79b7c67ea43 --- /dev/null +++ b/client/internal/peer/status_debounce_test.go @@ -0,0 +1,149 @@ +package peer + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// Codex hardening: while the engine is in the offline-debounce +// window, the per-peer Status must remain consistent with the +// daemon's actual local connection state. Specifically: if the local +// conn is still alive (no Close() yet), peer status must NOT +// prematurely flip to Idle/Disconnected just because liveness flipped +// false. + +func TestStatus_DuringOfflineDebounce_LocalConnStateUnchanged(t *testing.T) { + key := "test-peer-key" + ip := "10.10.10.10" + fqdn := "peer.example.local" + rec := NewRecorder("https://mgm") + if err := rec.AddPeer(key, fqdn, ip); err != nil { + t.Fatalf("AddPeer: %v", err) + } + + // Simulate: peer is connected via P2P, mgmt told us peer is live. + _ = rec.UpdatePeerICEState(State{ + PubKey: key, + ConnStatus: StatusConnected, + ConnStatusUpdate: time.Now(), + Relayed: false, + LocalIceCandidateType: "host", + RemoteIceCandidateType: "host", + LocalIceCandidateEndpoint: "192.168.91.154:51820", + RemoteIceCandidateEndpoint: "192.168.91.103:51820", + }) + _ = rec.UpdatePeerRemoteMeta(key, RemoteMeta{ + LiveOnline: true, + ServerLivenessKnown: true, + }) + + state, err := rec.GetPeer(key) + assert.NoError(t, err) + assert.Equal(t, StatusConnected, state.ConnStatus, "baseline: must be Connected") + assert.False(t, state.Relayed, "baseline: must be P2P (not relayed)") + assert.True(t, state.RemoteLiveOnline, "baseline: must be live") + + // Mgmt push: peer flipped to live=false. Status recorder MUST keep + // reporting the local conn as Connected/P2P -- the engine's + // debounce timer is what closes the conn (after 5 s grace), + // not the StatusRecorder. Until conn.Close fires, the daemon + // should answer status queries with the still-live transport. + _ = rec.UpdatePeerRemoteMeta(key, RemoteMeta{ + LiveOnline: false, + ServerLivenessKnown: true, + }) + + state, err = rec.GetPeer(key) + assert.NoError(t, err) + assert.Equal(t, StatusConnected, state.ConnStatus, "during debounce: ConnStatus must remain Connected") + assert.False(t, state.Relayed, "during debounce: Relayed must remain false") + assert.False(t, state.RemoteLiveOnline, "during debounce: liveness must reflect mgmt update") + assert.True(t, state.RemoteServerLivenessKnown, "during debounce: livenessKnown stays true") +} + +// After the engine actually closes the conn (e.g. debounce expired), +// the per-peer status should reflect the local-conn-closed state. +// The transition from Connected->Idle is driven by Conn.Close calling +// setStatusToDisconnected which calls UpdatePeerState(StatusIdle). +func TestStatus_AfterDebouncedClose_StatusReflectsLocalIdle(t *testing.T) { + key := "test-peer-key" + ip := "10.10.10.10" + fqdn := "peer.example.local" + rec := NewRecorder("https://mgm") + if err := rec.AddPeer(key, fqdn, ip); err != nil { + t.Fatalf("AddPeer: %v", err) + } + + // Connected state. + _ = rec.UpdatePeerICEState(State{ + PubKey: key, + ConnStatus: StatusConnected, + ConnStatusUpdate: time.Now(), + Relayed: false, + }) + + // Engine: debounce expired, conn.Close fired, status flips Idle. + _ = rec.UpdatePeerState(State{ + PubKey: key, + ConnStatus: StatusIdle, + ConnStatusUpdate: time.Now(), + }) + + state, err := rec.GetPeer(key) + assert.NoError(t, err) + assert.Equal(t, StatusIdle, state.ConnStatus, "after debounced close: status must be Idle") +} + +// Codex hardening: ConnectionTypeExtended derive must reflect the +// CURRENT live state, not transient debounce states. While peer is +// Connected via P2P and mgmt flips liveness false, the derived label +// should still report "P2P" -- the connection IS still working +// locally. Only when ConnStatus flips to Idle does the label clear. +func TestStatus_DeriveExtended_DuringLivenessFlap(t *testing.T) { + state := State{ + ConnStatus: StatusConnected, + Relayed: false, + RemoteLiveOnline: true, + RemoteServerLivenessKnown: true, + } + assert.Equal(t, "P2P", DeriveConnectionTypeExtended(state), "P2P with full live") + + // Liveness flips false (debounce in flight). + state.RemoteLiveOnline = false + assert.Equal(t, "P2P", DeriveConnectionTypeExtended(state), "P2P remains during liveness flip — local conn still works") + + // Eventually conn closes → ConnStatus Idle. + state.ConnStatus = StatusIdle + assert.Equal(t, "", DeriveConnectionTypeExtended(state), "Idle clears the label") +} + +// Status proto round-trip: new effective + ICE-backoff fields must +// survive ToProto without loss. +func TestStatus_GetFullStatus_PreservesEffectiveAndBackoffFields(t *testing.T) { + key := "p1" + rec := NewRecorder("https://mgm") + _ = rec.AddPeer(key, "p1.example", "10.10.10.10") + + _ = rec.UpdatePeerICEState(State{ + PubKey: key, + ConnStatus: StatusConnected, + ConnStatusUpdate: time.Now(), + Relayed: true, + }) + _ = rec.UpdatePeerRemoteMeta(key, RemoteMeta{ + EffectiveConnectionMode: "p2p-dynamic", + EffectiveRelayTimeoutSecs: 86400, + EffectiveP2PTimeoutSecs: 10800, + EffectiveP2PRetryMaxSecs: 900, + }) + + full := rec.GetFullStatus() + assert.Len(t, full.Peers, 1) + p := full.Peers[0] + assert.Equal(t, "p2p-dynamic", p.RemoteEffectiveConnectionMode) + assert.Equal(t, uint32(86400), p.RemoteEffectiveRelayTimeoutSecs) + assert.Equal(t, uint32(10800), p.RemoteEffectiveP2PTimeoutSecs) + assert.Equal(t, uint32(900), p.RemoteEffectiveP2PRetryMaxSecs) +} diff --git a/client/internal/peer/status_remote_meta_notify_test.go b/client/internal/peer/status_remote_meta_notify_test.go new file mode 100644 index 00000000000..21dc60d2d50 --- /dev/null +++ b/client/internal/peer/status_remote_meta_notify_test.go @@ -0,0 +1,168 @@ +package peer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// recordingListener captures OnPeersListChanged calls for assertion. +// Other Listener methods are no-ops because UpdatePeerRemoteMeta only +// triggers the peer-list path. +type recordingListener struct { + peersChangedCount atomic.Int32 +} + +func (r *recordingListener) OnConnected() {} +func (r *recordingListener) OnDisconnected() {} +func (r *recordingListener) OnConnecting() {} +func (r *recordingListener) OnDisconnecting() {} +func (r *recordingListener) OnAddressChanged(string, string) {} +func (r *recordingListener) OnPeersListChanged(int) { + r.peersChangedCount.Add(1) +} + +func waitForCount(t *testing.T, l *recordingListener, minCount int32, label string) { + t.Helper() + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if l.peersChangedCount.Load() >= minCount { + return + } + time.Sleep(10 * time.Millisecond) + } + t.Fatalf("%s: timed out waiting for OnPeersListChanged count >= %d (got %d)", + label, minCount, l.peersChangedCount.Load()) +} + +// Codex finding 3: UpdatePeerRemoteMeta must fire OnPeersListChanged +// when a UI-relevant field flips (LiveOnline, ServerLivenessKnown, +// EffectiveConnectionMode), so the Android peer-list refreshes +// immediately instead of at the next 30 s daemon-RPC poll. + +func TestStatus_UpdatePeerRemoteMeta_LiveOnlineFlipNotifies(t *testing.T) { + rec := NewRecorder("https://mgm") + listener := &recordingListener{} + rec.SetConnectionListener(listener) + if err := rec.AddPeer("peerA", "fqdn", "10.0.0.1"); err != nil { + t.Fatalf("AddPeer: %v", err) + } + // setListener fires an initial OnPeersListChanged; wait for it. + waitForCount(t, listener, 1, "initial setListener") + + // Baseline RemoteMeta with liveness=true, no notification expected + // because the freshly-added peer's RemoteLiveOnline default is false + // vs. true → that's a flip on the FIRST update too. Reset counter + // after baseline so the rest of the test only counts flips. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("baseline UpdatePeerRemoteMeta: %v", err) + } + waitForCount(t, listener, 2, "first flip from default") + listener.peersChangedCount.Store(0) + + // Repeat the SAME meta — must NOT notify (no flip). + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("idempotent UpdatePeerRemoteMeta: %v", err) + } + time.Sleep(50 * time.Millisecond) + assert.Equal(t, int32(0), listener.peersChangedCount.Load(), + "identical meta must not fire notification") + + // Flip true → false: must notify. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: false, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("flip true->false UpdatePeerRemoteMeta: %v", err) + } + waitForCount(t, listener, 1, "true->false flip") + listener.peersChangedCount.Store(0) + + // Flip back false → true: must notify. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("flip false->true UpdatePeerRemoteMeta: %v", err) + } + waitForCount(t, listener, 1, "false->true flip") +} + +func TestStatus_UpdatePeerRemoteMeta_EffectiveModeChangeNotifies(t *testing.T) { + rec := NewRecorder("https://mgm") + listener := &recordingListener{} + rec.SetConnectionListener(listener) + if err := rec.AddPeer("peerA", "fqdn", "10.0.0.1"); err != nil { + t.Fatalf("AddPeer: %v", err) + } + waitForCount(t, listener, 1, "initial setListener") + + // Baseline with mode=p2p-dynamic (flip from "" — counted). + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + EffectiveConnectionMode: "p2p-dynamic", LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("baseline: %v", err) + } + waitForCount(t, listener, 2, "baseline flip from empty mode") + listener.peersChangedCount.Store(0) + + // Same mode again — no notification. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + EffectiveConnectionMode: "p2p-dynamic", LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("idempotent: %v", err) + } + time.Sleep(50 * time.Millisecond) + assert.Equal(t, int32(0), listener.peersChangedCount.Load(), + "same mode must not fire notification") + + // Mode flip p2p-dynamic → relay-forced — must notify. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + EffectiveConnectionMode: "relay-forced", LiveOnline: true, ServerLivenessKnown: true, + }); err != nil { + t.Fatalf("mode flip: %v", err) + } + waitForCount(t, listener, 1, "mode flip") +} + +// Non-material fields (timeout values, groups, last-seen) MUST NOT fire +// the notification even when they change — they ride the next regular +// 30 s poll and don't need an immediate UI redraw. +func TestStatus_UpdatePeerRemoteMeta_NonMaterialFieldsDoNotNotify(t *testing.T) { + rec := NewRecorder("https://mgm") + listener := &recordingListener{} + rec.SetConnectionListener(listener) + if err := rec.AddPeer("peerA", "fqdn", "10.0.0.1"); err != nil { + t.Fatalf("AddPeer: %v", err) + } + waitForCount(t, listener, 1, "initial setListener") + + // Baseline. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: true, ServerLivenessKnown: true, + EffectiveRelayTimeoutSecs: 60, + Groups: []string{"g1"}, + LastSeenAtServer: time.Now(), + }); err != nil { + t.Fatalf("baseline: %v", err) + } + waitForCount(t, listener, 2, "baseline") + listener.peersChangedCount.Store(0) + + // Change only non-material fields — no notification expected. + if err := rec.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + LiveOnline: true, ServerLivenessKnown: true, + EffectiveRelayTimeoutSecs: 90, + Groups: []string{"g1", "g2"}, + LastSeenAtServer: time.Now(), + }); err != nil { + t.Fatalf("non-material change: %v", err) + } + time.Sleep(80 * time.Millisecond) + assert.Equal(t, int32(0), listener.peersChangedCount.Load(), + "non-material field changes must not fire notification") +} diff --git a/client/internal/peer/status_test.go b/client/internal/peer/status_test.go index 272638750ff..415dec35050 100644 --- a/client/internal/peer/status_test.go +++ b/client/internal/peer/status_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "sync/atomic" "testing" "time" @@ -243,7 +244,120 @@ func TestGetFullStatus(t *testing.T) { fullStatus := status.GetFullStatus() + // GetFullStatus sets ServerOnline=true for peers in d.peers. + peerState1.ServerOnline = true + peerState2.ServerOnline = true + assert.Equal(t, managementState, fullStatus.ManagementState, "management status should be equal") assert.Equal(t, signalState, fullStatus.SignalState, "signal status should be equal") assert.ElementsMatch(t, []State{peerState1, peerState2}, fullStatus.Peers, "peers states should match") } + +// TestStatus_ConnStateListener_CalledAfterUnlock verifies that the +// connStateListener registered via SetConnStateListener is invoked AFTER +// d.mux is released (Extract-Method guarantee). Phase 3.7i of #5989. +func TestStatus_ConnStateListener_CalledAfterUnlock(t *testing.T) { + d := NewRecorder("") + var listenerCalled atomic.Bool + var listenerObservedLockHeld atomic.Bool + + d.SetConnStateListener(func(_ string, _ State) { + // Try TryLock — if the locked body still holds mux this returns + // false. We record the result so the assertion below can report it. + if d.mux.TryLock() { + listenerObservedLockHeld.Store(false) + d.mux.Unlock() + } else { + listenerObservedLockHeld.Store(true) + } + listenerCalled.Store(true) + }) + + if err := d.AddPeer("peerA", "fqdn-A", "100.64.0.1"); err != nil { + t.Fatal(err) + } + // Trigger a ConnStatus transition (Idle -> Connected) which must fire + // the listener through updatePeerStateLocked. + if err := d.UpdatePeerState(State{ + PubKey: "peerA", + ConnStatus: StatusConnected, + ConnStatusUpdate: time.Now(), + }); err != nil { + t.Fatal(err) + } + + if !listenerCalled.Load() { + t.Error("listener not invoked") + } + if listenerObservedLockHeld.Load() { + t.Error("listener called while mux still held — Extract-Method refactor incomplete") + } +} + +// TestStatus_UpdatePeerRemoteMeta_PreservesConnStatus verifies that +// UpdatePeerRemoteMeta sets Remote* fields without touching ConnStatus. +// Phase 3.7i of #5989. +func TestStatus_UpdatePeerRemoteMeta_PreservesConnStatus(t *testing.T) { + d := NewRecorder("") + // Add a peer first so it exists in d.peers (the map). + if err := d.AddPeer("peerA", "fqdnA", "100.64.0.2"); err != nil { + t.Fatal(err) + } + // Set its ConnStatus to Connected so we can verify it is preserved. + if err := d.UpdatePeerState(State{ + PubKey: "peerA", + ConnStatus: StatusConnected, + Relayed: false, + }); err != nil { + t.Fatal(err) + } + + if err := d.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + EffectiveConnectionMode: "p2p-dynamic", + Groups: []string{"router"}, + }); err != nil { + t.Fatal(err) + } + + d.mux.Lock() + got := d.peers["peerA"] + d.mux.Unlock() + if got.ConnStatus != StatusConnected { + t.Errorf("ConnStatus changed: %v", got.ConnStatus) + } + if got.RemoteEffectiveConnectionMode != "p2p-dynamic" { + t.Errorf("EffectiveMode not set: %s", got.RemoteEffectiveConnectionMode) + } + if len(got.RemoteGroups) != 1 || got.RemoteGroups[0] != "router" { + t.Errorf("Groups not set: %v", got.RemoteGroups) + } +} + +// TestStatus_GetFullStatus_SetsServerOnlineAndCounters verifies aggregate +// counters and ServerOnline flag set in GetFullStatus. Phase 3.7i of #5989. +func TestStatus_GetFullStatus_SetsServerOnlineAndCounters(t *testing.T) { + d := NewRecorder("") + d.mux.Lock() + d.peers["a"] = State{PubKey: "a", ConnStatus: StatusConnected, Relayed: false} + d.peers["b"] = State{PubKey: "b", ConnStatus: StatusConnected, Relayed: true} + d.peers["c"] = State{PubKey: "c", ConnStatus: StatusIdle} + d.offlinePeers = []State{{PubKey: "d"}} + d.mux.Unlock() + + fs := d.GetFullStatus() + if fs.P2PConnectedPeers != 1 || fs.RelayedConnectedPeers != 1 || + fs.IdleOnlinePeers != 1 || fs.ServerOfflinePeers != 1 || + fs.ConfiguredPeersTotal != 4 { + t.Errorf("counters wrong: P2P=%d Relayed=%d Idle=%d Offline=%d Total=%d", + fs.P2PConnectedPeers, fs.RelayedConnectedPeers, + fs.IdleOnlinePeers, fs.ServerOfflinePeers, fs.ConfiguredPeersTotal) + } + for _, st := range fs.Peers { + if st.PubKey == "d" && st.ServerOnline { + t.Error("offline peer must have ServerOnline=false") + } + if st.PubKey != "d" && !st.ServerOnline { + t.Errorf("online peer %s must have ServerOnline=true", st.PubKey) + } + } +} diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 29bf5aaaa74..f4c881c87cc 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -101,6 +101,20 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { defer w.muxAgent.Unlock() if w.agent != nil || w.agentConnecting { + // Phase 3.7c (#5989) re-introduces the Guard-Loop Fix from PR #5805. + // While the local ICE agent is mid-connection, ignore any incoming + // offer regardless of sessionID. Both sides' Guards fire fresh + // offers every ~800ms-30s (driven by their own iceRetryState + + // srReconnect events). If we tear down on every sessionID-change, + // the in-flight ICE pair-checks (~5-10s) never complete -- the + // remote's freshly-recreated agent generates yet another sessionID, + // loops back, infinite recreate cycle. Empirically observed on + // badmitterndorf during LTE-carrier instability: 5 different + // sessionIDs received from the remote in 2min, no P2P convergence. + if w.agentConnecting { + w.log.Debugf("agent connecting, skipping new offer (sessionID %s) to let pair-checks finish", remoteOfferAnswer.SessionIDString()) + return + } // backward compatibility with old clients that do not send session ID if remoteOfferAnswer.SessionID == nil { w.log.Debugf("agent already exists, skipping the offer") @@ -201,6 +215,21 @@ func (w *WorkerICE) InProgress() bool { return w.agentConnecting } +// IsConnected returns true when pion's ICE agent reports Connected and +// has not yet transitioned to Disconnected/Failed/Closed. Used by +// Conn.onNetworkChange (Phase 3.7g of #5989) to skip a needless +// workerICE.Close when an srReconnect/network-change event arrives but +// the existing P2P session is still alive end-to-end (typical for a +// brief signal-server outage while peer-to-peer UDP keeps flowing). +// Closing the agent in that case forces a 15-25 s renegotiation cycle +// and a Relay→ICE handover gap that the user would observe as a ping +// dropout, even though no real peer-to-peer connectivity loss occurred. +func (w *WorkerICE) IsConnected() bool { + w.muxAgent.Lock() + defer w.muxAgent.Unlock() + return w.agent != nil && w.lastKnownState == ice.ConnectionStateConnected +} + func (w *WorkerICE) Close() { w.muxAgent.Lock() defer w.muxAgent.Unlock() @@ -520,6 +549,8 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia case ice.ConnectionStateConnected: w.lastKnownState = ice.ConnectionStateConnected w.logSuccessfulPaths(agent) + // Phase 3 of #5989: reset backoff on ICE success. + w.conn.onICEConnected() return case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected, ice.ConnectionStateClosed: // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to @@ -531,6 +562,13 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia w.lastKnownState = ice.ConnectionStateDisconnected w.conn.onICEStateDisconnected(sessionChanged) } + + // Phase 3 of #5989: record failure in backoff only for true + // ICE failure (not for the synthetic Closed event that occurs + // when we recreate the agent on reconnect). + if state == ice.ConnectionStateFailed { + w.conn.onICEFailed() + } default: return } diff --git a/client/internal/peerstore/store.go b/client/internal/peerstore/store.go index 099fe4528b0..12d9ed73eb6 100644 --- a/client/internal/peerstore/store.go +++ b/client/internal/peerstore/store.go @@ -95,6 +95,12 @@ func (s *Store) PeerConnOpen(ctx context.Context, pubKey string) { } +// PeerConnIdle is invoked by the lazy-manager when a peer's idle +// timer expires (relay-inactivity in p2p-lazy / p2p-dynamic). The +// connection is suspended but the WG peer entry stays so any +// route-manager-applied AllowedIPs (advertised subnets) survive the +// wake/sleep cycle. See docs/bugs/2026-05-04-lazy-wake-on-routed- +// subnet.md. func (s *Store) PeerConnIdle(pubKey string) { s.peerConnsMu.RLock() defer s.peerConnsMu.RUnlock() @@ -103,9 +109,12 @@ func (s *Store) PeerConnIdle(pubKey string) { if !ok { return } - p.Close(true) + p.Close(true, true) } +// PeerConnClose is invoked by the lazy-manager when a peer must be +// closed without notifying the remote side (e.g. excluded from lazy on +// re-evaluation). Same lazy-suspend semantics: keep the WG peer entry. func (s *Store) PeerConnClose(pubKey string) { s.peerConnsMu.RLock() defer s.peerConnsMu.RUnlock() @@ -114,7 +123,7 @@ func (s *Store) PeerConnClose(pubKey string) { if !ok { return } - p.Close(false) + p.Close(false, true) } func (s *Store) PeersPubKey() []string { diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index 20c615d579d..5abbdae8825 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -96,6 +96,11 @@ type ConfigInput struct { LazyConnectionEnabled *bool + ConnectionMode *string + RelayTimeoutSeconds *uint32 + P2pTimeoutSeconds *uint32 + P2pRetryMaxSeconds *uint32 + MTU *uint16 } @@ -170,6 +175,32 @@ type Config struct { LazyConnectionEnabled bool + ConnectionMode string `json:",omitempty"` + + // RelayTimeoutSeconds, P2pTimeoutSeconds and P2pRetryMaxSeconds + // are the local profile-config overrides for the Phase-3.7i + // connection-mode timeouts. The non-pointer uint32 representation + // means we cannot encode "explicit 0 (= disable)" locally: + // + // value > 0 -> use this value (overrides any server-pushed value) + // value == 0 -> "no local override" -> fall through to the + // server-pushed value, then to the daemon's + // built-in default if the server has none either. + // NOT the same as the server-side "0 = disable" + // sentinel (see types.Settings.P2pRetryMaxSeconds + // docstring); for backoff specifically, 0 always + // means "follow server" when configured locally. + // + // If you need to explicitly disable backoff on a single peer + // regardless of server settings, set it via the dashboard at the + // account level instead. The (*uint32) ConfigInput variant DOES + // distinguish nil from 0, but ApplyInput collapses both back to + // uint32(0) here -- a structural fix would require changing every + // caller of cfg.XxxSeconds across the codebase. + RelayTimeoutSeconds uint32 `json:",omitempty"` + P2pTimeoutSeconds uint32 `json:",omitempty"` + P2pRetryMaxSeconds uint32 `json:"p2p_retry_max_seconds,omitempty"` + MTU uint16 } @@ -593,6 +624,27 @@ func (config *Config) apply(input ConfigInput) (updated bool, err error) { updated = true } + if input.ConnectionMode != nil && *input.ConnectionMode != config.ConnectionMode { + log.Infof("switching connection mode to %s", *input.ConnectionMode) + config.ConnectionMode = *input.ConnectionMode + updated = true + } + if input.RelayTimeoutSeconds != nil && *input.RelayTimeoutSeconds != config.RelayTimeoutSeconds { + log.Infof("switching relay timeout to %d seconds", *input.RelayTimeoutSeconds) + config.RelayTimeoutSeconds = *input.RelayTimeoutSeconds + updated = true + } + if input.P2pTimeoutSeconds != nil && *input.P2pTimeoutSeconds != config.P2pTimeoutSeconds { + log.Infof("switching p2p timeout to %d seconds", *input.P2pTimeoutSeconds) + config.P2pTimeoutSeconds = *input.P2pTimeoutSeconds + updated = true + } + if input.P2pRetryMaxSeconds != nil && *input.P2pRetryMaxSeconds != config.P2pRetryMaxSeconds { + log.Infof("switching p2p retry max to %d seconds", *input.P2pRetryMaxSeconds) + config.P2pRetryMaxSeconds = *input.P2pRetryMaxSeconds + updated = true + } + if input.MTU != nil && *input.MTU != config.MTU { log.Infof("updating MTU to %d (old value %d)", *input.MTU, config.MTU) config.MTU = *input.MTU diff --git a/client/internal/stdnet/filter.go b/client/internal/stdnet/filter.go index e457140018f..f8cc8bfaeb2 100644 --- a/client/internal/stdnet/filter.go +++ b/client/internal/stdnet/filter.go @@ -8,19 +8,72 @@ import ( "golang.zx2c4.com/wireguard/wgctrl" ) +// windowsKnownBadSubstrings lists Windows interface-name fragments that +// should ALWAYS be excluded from ICE candidate gathering, even when the +// caller-supplied disallow list does not cover them. These are the +// interfaces uray-mic-d4's debug bundle (2026-05-04) showed Pion ICE +// picking as host candidates -- producing dead-end pairs because none +// of these can be reached from the public internet: +// +// - "loopback pseudo-interface" -> 127.0.0.1 (loopback) +// - "vethernet (default switch)" -> 172.26.x.x (Hyper-V NAT-only) +// - "vethernet (wsl" -> WSL2 host-only +// +// Matched as case-insensitive substrings. +// +// IMPORTANT: User-named Hyper-V external switches like "vEthernet (LAN)" +// MUST NOT be filtered. On uray-mic-d4 that interface IS the default +// route (192.168.0.243/22 -> 0.0.0.0/0 via 192.168.0.254). Filtering it +// out would actually break P2P, not improve it. +var windowsKnownBadSubstrings = []string{ + "loopback pseudo-interface", + "vethernet (default switch)", + "vethernet (wsl", +} + // InterfaceFilter is a function passed to ICE Agent to filter out not allowed interfaces // to avoid building tunnel over them. +// +// Matching is case-insensitive because Windows interface names use mixed +// case (e.g. "Loopback Pseudo-Interface 1") while the disallow list is +// lowercase. Without the fold, the historic implementation let every +// Windows interface slip past and Pion ICE picked junk addresses +// (127.0.0.1, 172.26.x.x Hyper-V Default Switch, internal-VPN /22s) as +// local host candidates, dooming P2P to dead-end pairs and forcing +// relay-only. See windowsKnownBadSubstrings for the targeted Windows +// extras. +// +// Reported by Michael Uray on uray-mic-d4 (2026-05-04): 0/28 peers P2P. func InterfaceFilter(disallowList []string) func(string) bool { - return func(iFace string) bool { + lowerIFace := strings.ToLower(iFace) - if strings.HasPrefix(iFace, "lo") { - // hardcoded loopback check to support already installed agents + // Linux/macOS loopback prefix ("lo", "lo0"). + if strings.HasPrefix(lowerIFace, "lo") { return false } + // Windows-specific known-bad substrings (loopback, NAT switches). + if runtime.GOOS == "windows" { + for _, sub := range windowsKnownBadSubstrings { + if strings.Contains(lowerIFace, sub) { + return false + } + } + } + for _, s := range disallowList { - if strings.HasPrefix(iFace, s) && runtime.GOOS != "ios" { + sLower := strings.ToLower(s) + // "veth" exists on both Linux (legitimate veth pair to filter) + // and Windows (where every Hyper-V iface starts with vEthernet, + // including the user's REAL default-route external switch). On + // Windows, junk Hyper-V interfaces are filtered above by name; + // applying a blanket vEthernet* prefix here would also drop + // user-named external switches like "vEthernet (LAN)". + if sLower == "veth" && runtime.GOOS == "windows" { + continue + } + if strings.HasPrefix(lowerIFace, sLower) && runtime.GOOS != "ios" { return false } } diff --git a/client/internal/stdnet/filter_test.go b/client/internal/stdnet/filter_test.go new file mode 100644 index 00000000000..8a9fa84ab6d --- /dev/null +++ b/client/internal/stdnet/filter_test.go @@ -0,0 +1,80 @@ +package stdnet + +import ( + "runtime" + "testing" +) + +// Regression test for the Windows-side ICE interface filter. +// +// Two things this test pins down: +// +// 1. Loopback / Hyper-V Default Switch / WSL adapter must be excluded +// even though their Windows names ("Loopback Pseudo-Interface 1", +// "vEthernet (Default Switch)") don't share a lowercase prefix with +// anything in the default disallow list. +// +// 2. User-named Hyper-V external switches (e.g. "vEthernet (LAN)") +// MUST stay allowed. On uray-mic-d4 (Michael Uray's debug bundle +// 2026-05-04) that interface owns the default route at +// 192.168.0.243/22 -> 0.0.0.0/0; filtering it out would have +// made P2P worse, not better. Codex review caught the broad +// "veth"-prefix variant of this fix before it shipped. +func TestInterfaceFilter_Windows_TargetedFiltering(t *testing.T) { + disallow := []string{"wt", "wg", "veth", "br-", "lo", "docker"} + allow := InterfaceFilter(disallow) + + cases := []struct { + name string + want bool // true => allowed, false => filtered out + }{ + // Always-bad Windows interfaces: filtered. + {"Loopback Pseudo-Interface 1", false}, + {"vEthernet (Default Switch)", false}, + {"vEthernet (WSL)", false}, + {"vEthernet (WSL (Hyper-V firewall))", false}, + // Disallow-list tokens (any platform). + {"wt0", false}, + // Linux names (lowercase) still filtered: + {"lo", false}, + + // Real candidate interfaces stay allowed. + {"Ethernet USB", true}, + {"OpenVPN 1", true}, + {"WiFi", true}, + // Critical: user-named Hyper-V external switch is the actual + // default-route interface and must NOT be dropped. + {"vEthernet (LAN)", true}, + {"vEthernet (External)", true}, + } + + for _, c := range cases { + // The wgctrl branch can override on hosts where NetBird is + // running; tests run on a host where these names are not + // real interfaces, so the final return faithfully reflects + // the disallow-list logic. + got := allow(c.name) + // "veth*" prefix only filters on non-Windows; on Linux test + // runners "vEthernet (LAN)" still passes because of mixed + // case + the !Windows branch keeping the prefix match. + if !c.want && got { + t.Errorf("InterfaceFilter(%q) = true, want false (should be filtered)", c.name) + } + if c.want && !got && runtime.GOOS == "windows" && c.name == "vEthernet (LAN)" { + t.Fatalf("InterfaceFilter(%q) = false, want true on Windows (this is uray-mic-d4's default-route interface)", c.name) + } + } +} + +// Linux-side regression: keep filtering legitimate Linux veth pairs. +func TestInterfaceFilter_Linux_VethPair(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("veth prefix filter is intentionally skipped on Windows") + } + allow := InterfaceFilter([]string{"veth", "docker", "lo"}) + for _, name := range []string{"veth0", "veth1234", "docker0", "lo"} { + if allow(name) { + t.Errorf("InterfaceFilter(%q) = true, want false on Linux", name) + } + } +} diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 11e7877f2df..8740f9a6027 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -342,8 +342,18 @@ type LoginRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,37,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,38,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,39,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LoginRequest) Reset() { @@ -650,6 +660,34 @@ func (x *LoginRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *LoginRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *LoginRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type LoginResponse struct { state protoimpl.MessageState `protogen:"open.v1"` NeedsSSOLogin bool `protobuf:"varint,1,opt,name=needsSSOLogin,proto3" json:"needsSSOLogin,omitempty"` @@ -1182,8 +1220,28 @@ type GetConfigResponse struct { EnableSSHRemotePortForwarding bool `protobuf:"varint,23,opt,name=enableSSHRemotePortForwarding,proto3" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth bool `protobuf:"varint,25,opt,name=disableSSHAuth,proto3" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL int32 `protobuf:"varint,26,opt,name=sshJWTCacheTTL,proto3" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + ConnectionMode string `protobuf:"bytes,27,opt,name=connection_mode,json=connectionMode,proto3" json:"connection_mode,omitempty"` + P2PTimeoutSeconds uint32 `protobuf:"varint,28,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds uint32 `protobuf:"varint,29,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + P2PRetryMaxSeconds uint32 `protobuf:"varint,30,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3" json:"p2p_retry_max_seconds,omitempty"` + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + ServerPushedConnectionMode string `protobuf:"bytes,31,opt,name=server_pushed_connection_mode,json=serverPushedConnectionMode,proto3" json:"server_pushed_connection_mode,omitempty"` + ServerPushedRelayTimeoutSeconds uint32 `protobuf:"varint,32,opt,name=server_pushed_relay_timeout_seconds,json=serverPushedRelayTimeoutSeconds,proto3" json:"server_pushed_relay_timeout_seconds,omitempty"` + ServerPushedP2PTimeoutSeconds uint32 `protobuf:"varint,33,opt,name=server_pushed_p2p_timeout_seconds,json=serverPushedP2pTimeoutSeconds,proto3" json:"server_pushed_p2p_timeout_seconds,omitempty"` + ServerPushedP2PRetryMaxSeconds uint32 `protobuf:"varint,34,opt,name=server_pushed_p2p_retry_max_seconds,json=serverPushedP2pRetryMaxSeconds,proto3" json:"server_pushed_p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetConfigResponse) Reset() { @@ -1398,6 +1456,62 @@ func (x *GetConfigResponse) GetSshJWTCacheTTL() int32 { return 0 } +func (x *GetConfigResponse) GetConnectionMode() string { + if x != nil { + return x.ConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedConnectionMode() string { + if x != nil { + return x.ServerPushedConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetServerPushedRelayTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedRelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PRetryMaxSeconds + } + return 0 +} + // PeerState contains the latest state of a peer type PeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1419,8 +1533,37 @@ type PeerState struct { Latency *durationpb.Duration `protobuf:"bytes,17,opt,name=latency,proto3" json:"latency,omitempty"` RelayAddress string `protobuf:"bytes,18,opt,name=relayAddress,proto3" json:"relayAddress,omitempty"` SshHostKey []byte `protobuf:"bytes,19,opt,name=sshHostKey,proto3" json:"sshHostKey,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int32 `protobuf:"varint,20,opt,name=iceBackoffFailures,proto3" json:"iceBackoffFailures,omitempty"` + IceBackoffNextRetry *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=iceBackoffNextRetry,proto3" json:"iceBackoffNextRetry,omitempty"` + IceBackoffSuspended bool `protobuf:"varint,22,opt,name=iceBackoffSuspended,proto3" json:"iceBackoffSuspended,omitempty"` + // Phase 3.7i (#5989): per-peer enrichment from RemotePeerConfig. + ServerOnline bool `protobuf:"varint,30,opt,name=server_online,json=serverOnline,proto3" json:"server_online,omitempty"` + LastSeenAtServer *timestamppb.Timestamp `protobuf:"bytes,31,opt,name=last_seen_at_server,json=lastSeenAtServer,proto3" json:"last_seen_at_server,omitempty"` + Groups []string `protobuf:"bytes,32,rep,name=groups,proto3" json:"groups,omitempty"` + EffectiveConnectionMode string `protobuf:"bytes,33,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + ConfiguredConnectionMode string `protobuf:"bytes,34,opt,name=configured_connection_mode,json=configuredConnectionMode,proto3" json:"configured_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,35,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,36,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,37,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` + ConfiguredRelayTimeoutSecs uint32 `protobuf:"varint,38,opt,name=configured_relay_timeout_secs,json=configuredRelayTimeoutSecs,proto3" json:"configured_relay_timeout_secs,omitempty"` + ConfiguredP2PTimeoutSecs uint32 `protobuf:"varint,39,opt,name=configured_p2p_timeout_secs,json=configuredP2pTimeoutSecs,proto3" json:"configured_p2p_timeout_secs,omitempty"` + ConfiguredP2PRetryMaxSecs uint32 `protobuf:"varint,40,opt,name=configured_p2p_retry_max_secs,json=configuredP2pRetryMaxSecs,proto3" json:"configured_p2p_retry_max_secs,omitempty"` + // Phase 3.7i (#5989): UI-friendly transient connection-type label. + // Values: + // + // "" -- not connected (Idle/Connecting) + // "P2P" -- direct ICE path active + // "Relayed" -- relay path active, ICE failed/backoff + // "Relayed (negotiating P2P)" -- relay path active, ICE still trying + // (transient ~1-2s window after wakeup) + // + // UIs should display this verbatim instead of computing from + // (relayed, ice_*) themselves -- keeps the transition window + // visualization consistent across all clients. + ConnectionTypeExtended string `protobuf:"bytes,41,opt,name=connection_type_extended,json=connectionTypeExtended,proto3" json:"connection_type_extended,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PeerState) Reset() { @@ -1579,6 +1722,111 @@ func (x *PeerState) GetSshHostKey() []byte { return nil } +func (x *PeerState) GetIceBackoffFailures() int32 { + if x != nil { + return x.IceBackoffFailures + } + return 0 +} + +func (x *PeerState) GetIceBackoffNextRetry() *timestamppb.Timestamp { + if x != nil { + return x.IceBackoffNextRetry + } + return nil +} + +func (x *PeerState) GetIceBackoffSuspended() bool { + if x != nil { + return x.IceBackoffSuspended + } + return false +} + +func (x *PeerState) GetServerOnline() bool { + if x != nil { + return x.ServerOnline + } + return false +} + +func (x *PeerState) GetLastSeenAtServer() *timestamppb.Timestamp { + if x != nil { + return x.LastSeenAtServer + } + return nil +} + +func (x *PeerState) GetGroups() []string { + if x != nil { + return x.Groups + } + return nil +} + +func (x *PeerState) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *PeerState) GetConfiguredConnectionMode() string { + if x != nil { + return x.ConfiguredConnectionMode + } + return "" +} + +func (x *PeerState) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredRelayTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredRelayTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredP2PTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredP2PTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredP2PRetryMaxSecs() uint32 { + if x != nil { + return x.ConfiguredP2PRetryMaxSecs + } + return 0 +} + +func (x *PeerState) GetConnectionTypeExtended() string { + if x != nil { + return x.ConnectionTypeExtended + } + return "" +} + // LocalPeerState contains the latest state of the local peer type LocalPeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2066,8 +2314,15 @@ type FullStatus struct { Events []*SystemEvent `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` LazyConnectionEnabled bool `protobuf:"varint,9,opt,name=lazyConnectionEnabled,proto3" json:"lazyConnectionEnabled,omitempty"` SshServerState *SSHServerState `protobuf:"bytes,10,opt,name=sshServerState,proto3" json:"sshServerState,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 3.7i (#5989): aggregate counters so UIs don't re-derive them. + ConfiguredPeersTotal uint32 `protobuf:"varint,50,opt,name=configured_peers_total,json=configuredPeersTotal,proto3" json:"configured_peers_total,omitempty"` + ServerOnlinePeers uint32 `protobuf:"varint,51,opt,name=server_online_peers,json=serverOnlinePeers,proto3" json:"server_online_peers,omitempty"` + P2PConnectedPeers uint32 `protobuf:"varint,52,opt,name=p2p_connected_peers,json=p2pConnectedPeers,proto3" json:"p2p_connected_peers,omitempty"` + RelayedConnectedPeers uint32 `protobuf:"varint,53,opt,name=relayed_connected_peers,json=relayedConnectedPeers,proto3" json:"relayed_connected_peers,omitempty"` + IdleOnlinePeers uint32 `protobuf:"varint,54,opt,name=idle_online_peers,json=idleOnlinePeers,proto3" json:"idle_online_peers,omitempty"` + ServerOfflinePeers uint32 `protobuf:"varint,55,opt,name=server_offline_peers,json=serverOfflinePeers,proto3" json:"server_offline_peers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FullStatus) Reset() { @@ -2170,6 +2425,48 @@ func (x *FullStatus) GetSshServerState() *SSHServerState { return nil } +func (x *FullStatus) GetConfiguredPeersTotal() uint32 { + if x != nil { + return x.ConfiguredPeersTotal + } + return 0 +} + +func (x *FullStatus) GetServerOnlinePeers() uint32 { + if x != nil { + return x.ServerOnlinePeers + } + return 0 +} + +func (x *FullStatus) GetP2PConnectedPeers() uint32 { + if x != nil { + return x.P2PConnectedPeers + } + return 0 +} + +func (x *FullStatus) GetRelayedConnectedPeers() uint32 { + if x != nil { + return x.RelayedConnectedPeers + } + return 0 +} + +func (x *FullStatus) GetIdleOnlinePeers() uint32 { + if x != nil { + return x.IdleOnlinePeers + } + return 0 +} + +func (x *FullStatus) GetServerOfflinePeers() uint32 { + if x != nil { + return x.ServerOfflinePeers + } + return 0 +} + // Networks type ListNetworksRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -4009,8 +4306,18 @@ type SetConfigRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,32,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,33,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,34,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SetConfigRequest) Reset() { @@ -4281,6 +4588,34 @@ func (x *SetConfigRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *SetConfigRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *SetConfigRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type SetConfigResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -6186,7 +6521,7 @@ var File_daemon_proto protoreflect.FileDescriptor const file_daemon_proto_rawDesc = "" + "\n" + "\fdaemon.proto\x12\x06daemon\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"\x0e\n" + - "\fEmptyRequest\"\xb6\x12\n" + + "\fEmptyRequest\"\xea\x14\n" + "\fLoginRequest\x12\x1a\n" + "\bsetupKey\x18\x01 \x01(\tR\bsetupKey\x12&\n" + "\fpreSharedKey\x18\x02 \x01(\tB\x02\x18\x01R\fpreSharedKey\x12$\n" + @@ -6230,7 +6565,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18$ \x01(\bH\x17R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18% \x01(\bH\x18R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18& \x01(\bH\x19R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x1bR\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x1cR\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1dR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1eR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6257,7 +6596,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\xb5\x01\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\xb5\x01\n" + "\rLoginResponse\x12$\n" + "\rneedsSSOLogin\x18\x01 \x01(\bR\rneedsSSOLogin\x12\x1a\n" + "\buserCode\x18\x02 \x01(\tR\buserCode\x12(\n" + @@ -6290,7 +6633,7 @@ const file_daemon_proto_rawDesc = "" + "\fDownResponse\"P\n" + "\x10GetConfigRequest\x12 \n" + "\vprofileName\x18\x01 \x01(\tR\vprofileName\x12\x1a\n" + - "\busername\x18\x02 \x01(\tR\busername\"\xdb\b\n" + + "\busername\x18\x02 \x01(\tR\busername\"\xc3\f\n" + "\x11GetConfigResponse\x12$\n" + "\rmanagementUrl\x18\x01 \x01(\tR\rmanagementUrl\x12\x1e\n" + "\n" + @@ -6321,7 +6664,15 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x16 \x01(\bR\x1cenableSSHLocalPortForwarding\x12D\n" + "\x1denableSSHRemotePortForwarding\x18\x17 \x01(\bR\x1denableSSHRemotePortForwarding\x12&\n" + "\x0edisableSSHAuth\x18\x19 \x01(\bR\x0edisableSSHAuth\x12&\n" + - "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\"\xfe\x05\n" + + "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\x12'\n" + + "\x0fconnection_mode\x18\x1b \x01(\tR\x0econnectionMode\x12.\n" + + "\x13p2p_timeout_seconds\x18\x1c \x01(\rR\x11p2pTimeoutSeconds\x122\n" + + "\x15relay_timeout_seconds\x18\x1d \x01(\rR\x13relayTimeoutSeconds\x121\n" + + "\x15p2p_retry_max_seconds\x18\x1e \x01(\rR\x12p2pRetryMaxSeconds\x12A\n" + + "\x1dserver_pushed_connection_mode\x18\x1f \x01(\tR\x1aserverPushedConnectionMode\x12L\n" + + "#server_pushed_relay_timeout_seconds\x18 \x01(\rR\x1fserverPushedRelayTimeoutSeconds\x12H\n" + + "!server_pushed_p2p_timeout_seconds\x18! \x01(\rR\x1dserverPushedP2pTimeoutSeconds\x12K\n" + + "#server_pushed_p2p_retry_max_seconds\x18\" \x01(\rR\x1eserverPushedP2pRetryMaxSeconds\"\xec\f\n" + "\tPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12\x1e\n" + @@ -6345,7 +6696,22 @@ const file_daemon_proto_rawDesc = "" + "\frelayAddress\x18\x12 \x01(\tR\frelayAddress\x12\x1e\n" + "\n" + "sshHostKey\x18\x13 \x01(\fR\n" + - "sshHostKey\"\xf0\x01\n" + + "sshHostKey\x12.\n" + + "\x12iceBackoffFailures\x18\x14 \x01(\x05R\x12iceBackoffFailures\x12L\n" + + "\x13iceBackoffNextRetry\x18\x15 \x01(\v2\x1a.google.protobuf.TimestampR\x13iceBackoffNextRetry\x120\n" + + "\x13iceBackoffSuspended\x18\x16 \x01(\bR\x13iceBackoffSuspended\x12#\n" + + "\rserver_online\x18\x1e \x01(\bR\fserverOnline\x12I\n" + + "\x13last_seen_at_server\x18\x1f \x01(\v2\x1a.google.protobuf.TimestampR\x10lastSeenAtServer\x12\x16\n" + + "\x06groups\x18 \x03(\tR\x06groups\x12:\n" + + "\x19effective_connection_mode\x18! \x01(\tR\x17effectiveConnectionMode\x12<\n" + + "\x1aconfigured_connection_mode\x18\" \x01(\tR\x18configuredConnectionMode\x12?\n" + + "\x1ceffective_relay_timeout_secs\x18# \x01(\rR\x19effectiveRelayTimeoutSecs\x12;\n" + + "\x1aeffective_p2p_timeout_secs\x18$ \x01(\rR\x17effectiveP2pTimeoutSecs\x12>\n" + + "\x1ceffective_p2p_retry_max_secs\x18% \x01(\rR\x18effectiveP2pRetryMaxSecs\x12A\n" + + "\x1dconfigured_relay_timeout_secs\x18& \x01(\rR\x1aconfiguredRelayTimeoutSecs\x12=\n" + + "\x1bconfigured_p2p_timeout_secs\x18' \x01(\rR\x18configuredP2pTimeoutSecs\x12@\n" + + "\x1dconfigured_p2p_retry_max_secs\x18( \x01(\rR\x19configuredP2pRetryMaxSecs\x128\n" + + "\x18connection_type_extended\x18) \x01(\tR\x16connectionTypeExtended\"\xf0\x01\n" + "\x0eLocalPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12(\n" + @@ -6380,7 +6746,7 @@ const file_daemon_proto_rawDesc = "" + "\fportForwards\x18\x05 \x03(\tR\fportForwards\"^\n" + "\x0eSSHServerState\x12\x18\n" + "\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" + - "\bsessions\x18\x02 \x03(\v2\x16.daemon.SSHSessionInfoR\bsessions\"\xaf\x04\n" + + "\bsessions\x18\x02 \x03(\v2\x16.daemon.SSHSessionInfoR\bsessions\"\xdb\x06\n" + "\n" + "FullStatus\x12A\n" + "\x0fmanagementState\x18\x01 \x01(\v2\x17.daemon.ManagementStateR\x0fmanagementState\x125\n" + @@ -6394,7 +6760,13 @@ const file_daemon_proto_rawDesc = "" + "\x06events\x18\a \x03(\v2\x13.daemon.SystemEventR\x06events\x124\n" + "\x15lazyConnectionEnabled\x18\t \x01(\bR\x15lazyConnectionEnabled\x12>\n" + "\x0esshServerState\x18\n" + - " \x01(\v2\x16.daemon.SSHServerStateR\x0esshServerState\"\x15\n" + + " \x01(\v2\x16.daemon.SSHServerStateR\x0esshServerState\x124\n" + + "\x16configured_peers_total\x182 \x01(\rR\x14configuredPeersTotal\x12.\n" + + "\x13server_online_peers\x183 \x01(\rR\x11serverOnlinePeers\x12.\n" + + "\x13p2p_connected_peers\x184 \x01(\rR\x11p2pConnectedPeers\x126\n" + + "\x17relayed_connected_peers\x185 \x01(\rR\x15relayedConnectedPeers\x12*\n" + + "\x11idle_online_peers\x186 \x01(\rR\x0fidleOnlinePeers\x120\n" + + "\x14server_offline_peers\x187 \x01(\rR\x12serverOfflinePeers\"\x15\n" + "\x13ListNetworksRequest\"?\n" + "\x14ListNetworksResponse\x12'\n" + "\x06routes\x18\x01 \x03(\v2\x0f.daemon.NetworkR\x06routes\"a\n" + @@ -6534,7 +6906,7 @@ const file_daemon_proto_rawDesc = "" + "\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01B\x0e\n" + "\f_profileNameB\v\n" + "\t_username\"\x17\n" + - "\x15SwitchProfileResponse\"\xdf\x10\n" + + "\x15SwitchProfileResponse\"\x93\x13\n" + "\x10SetConfigRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + "\vprofileName\x18\x02 \x01(\tR\vprofileName\x12$\n" + @@ -6573,7 +6945,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x1f \x01(\bH\x14R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18 \x01(\bH\x15R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18! \x01(\bH\x16R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x18R\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x19R\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1aR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1bR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6597,7 +6973,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\x13\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\x13\n" + "\x11SetConfigResponse\"Q\n" + "\x11AddProfileRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + @@ -6896,121 +7276,123 @@ var file_daemon_proto_depIdxs = []int32{ 102, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp 102, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp 101, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState - 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState - 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 98, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 99, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State - 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 102, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 100, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 101, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady - 101, // 34: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration - 101, // 35: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration - 30, // 36: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 5, // 37: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 7, // 38: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 9, // 39: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 11, // 40: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 13, // 41: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 15, // 42: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 26, // 43: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 28, // 44: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 28, // 45: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 4, // 46: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 35, // 47: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 37, // 48: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 39, // 49: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 42, // 50: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 44, // 51: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 46, // 52: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 48, // 53: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 51, // 54: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 92, // 55: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest - 94, // 56: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest - 96, // 57: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest - 54, // 58: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 56, // 59: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 58, // 60: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 60, // 61: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 62, // 62: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 64, // 63: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 66, // 64: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 69, // 65: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 71, // 66: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 73, // 67: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 75, // 68: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest - 77, // 69: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 79, // 70: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 81, // 71: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 83, // 72: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 85, // 73: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 87, // 74: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 89, // 75: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 6, // 76: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 8, // 77: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 10, // 78: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 12, // 79: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 14, // 80: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 16, // 81: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 27, // 82: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 29, // 83: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 29, // 84: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 34, // 85: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 36, // 86: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 38, // 87: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 40, // 88: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 43, // 89: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 45, // 90: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 47, // 91: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 49, // 92: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 53, // 93: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 93, // 94: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket - 95, // 95: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse - 97, // 96: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse - 55, // 97: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 57, // 98: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 59, // 99: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 61, // 100: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 63, // 101: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 65, // 102: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 67, // 103: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 70, // 104: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 72, // 105: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 74, // 106: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 76, // 107: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse - 78, // 108: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 109: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 110: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 84, // 111: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 86, // 112: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 88, // 113: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 90, // 114: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 76, // [76:115] is the sub-list for method output_type - 37, // [37:76] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 102, // 5: daemon.PeerState.iceBackoffNextRetry:type_name -> google.protobuf.Timestamp + 102, // 6: daemon.PeerState.last_seen_at_server:type_name -> google.protobuf.Timestamp + 23, // 7: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 20, // 8: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 19, // 9: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 18, // 10: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 17, // 11: daemon.FullStatus.peers:type_name -> daemon.PeerState + 21, // 12: daemon.FullStatus.relays:type_name -> daemon.RelayState + 22, // 13: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 55, // 14: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 24, // 15: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 31, // 16: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 98, // 17: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 99, // 18: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 32, // 19: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 32, // 20: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 33, // 21: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 22: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 23: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 41, // 24: daemon.ListStatesResponse.states:type_name -> daemon.State + 50, // 25: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 52, // 26: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 2, // 27: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 3, // 28: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 102, // 29: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 100, // 30: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 55, // 31: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 101, // 32: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 68, // 33: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 34: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 91, // 35: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 101, // 36: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration + 101, // 37: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration + 30, // 38: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 5, // 39: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 7, // 40: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 9, // 41: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 11, // 42: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 13, // 43: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 15, // 44: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 26, // 45: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 28, // 46: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 28, // 47: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 4, // 48: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 35, // 49: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 37, // 50: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 39, // 51: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 42, // 52: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 44, // 53: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 46, // 54: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 48, // 55: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 51, // 56: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 92, // 57: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest + 94, // 58: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest + 96, // 59: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest + 54, // 60: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 56, // 61: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 58, // 62: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 60, // 63: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 62, // 64: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 64, // 65: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 66, // 66: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 69, // 67: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 71, // 68: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 73, // 69: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 75, // 70: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 77, // 71: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 79, // 72: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 81, // 73: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 83, // 74: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 75: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 87, // 76: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 89, // 77: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 6, // 78: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 8, // 79: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 10, // 80: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 12, // 81: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 14, // 82: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 16, // 83: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 27, // 84: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 29, // 85: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 29, // 86: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 34, // 87: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 36, // 88: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 38, // 89: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 40, // 90: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 43, // 91: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 45, // 92: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 47, // 93: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 49, // 94: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 53, // 95: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 93, // 96: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket + 95, // 97: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse + 97, // 98: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse + 55, // 99: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 57, // 100: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 59, // 101: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 61, // 102: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 63, // 103: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 65, // 104: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 67, // 105: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 70, // 106: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 72, // 107: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 74, // 108: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 76, // 109: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 78, // 110: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 111: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 112: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 113: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 114: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 88, // 115: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 90, // 116: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 78, // [78:117] is the sub-list for method output_type + 39, // [39:78] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_daemon_proto_init() } diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 3fee9eca82d..168f8f82bba 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -204,6 +204,18 @@ message LoginRequest { optional bool enableSSHRemotePortForwarding = 37; optional bool disableSSHAuth = 38; optional int32 sshJWTCacheTTL = 39; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message LoginResponse { @@ -311,6 +323,28 @@ message GetConfigResponse { bool disableSSHAuth = 25; int32 sshJWTCacheTTL = 26; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + string connection_mode = 27; + uint32 p2p_timeout_seconds = 28; + uint32 relay_timeout_seconds = 29; + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + uint32 p2p_retry_max_seconds = 30; + + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + string server_pushed_connection_mode = 31; + uint32 server_pushed_relay_timeout_seconds = 32; + uint32 server_pushed_p2p_timeout_seconds = 33; + uint32 server_pushed_p2p_retry_max_seconds = 34; } // PeerState contains the latest state of a peer @@ -333,6 +367,33 @@ message PeerState { google.protobuf.Duration latency = 17; string relayAddress = 18; bytes sshHostKey = 19; + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + int32 iceBackoffFailures = 20; + google.protobuf.Timestamp iceBackoffNextRetry = 21; + bool iceBackoffSuspended = 22; + // Phase 3.7i (#5989): per-peer enrichment from RemotePeerConfig. + bool server_online = 30; + google.protobuf.Timestamp last_seen_at_server = 31; + repeated string groups = 32; + string effective_connection_mode = 33; + string configured_connection_mode = 34; + uint32 effective_relay_timeout_secs = 35; + uint32 effective_p2p_timeout_secs = 36; + uint32 effective_p2p_retry_max_secs = 37; + uint32 configured_relay_timeout_secs = 38; + uint32 configured_p2p_timeout_secs = 39; + uint32 configured_p2p_retry_max_secs = 40; + // Phase 3.7i (#5989): UI-friendly transient connection-type label. + // Values: + // "" -- not connected (Idle/Connecting) + // "P2P" -- direct ICE path active + // "Relayed" -- relay path active, ICE failed/backoff + // "Relayed (negotiating P2P)" -- relay path active, ICE still trying + // (transient ~1-2s window after wakeup) + // UIs should display this verbatim instead of computing from + // (relayed, ice_*) themselves -- keeps the transition window + // visualization consistent across all clients. + string connection_type_extended = 41; } // LocalPeerState contains the latest state of the local peer @@ -403,6 +464,13 @@ message FullStatus { bool lazyConnectionEnabled = 9; SSHServerState sshServerState = 10; + // Phase 3.7i (#5989): aggregate counters so UIs don't re-derive them. + uint32 configured_peers_total = 50; + uint32 server_online_peers = 51; + uint32 p2p_connected_peers = 52; + uint32 relayed_connected_peers = 53; + uint32 idle_online_peers = 54; + uint32 server_offline_peers = 55; } // Networks @@ -672,6 +740,18 @@ message SetConfigRequest { optional bool enableSSHRemotePortForwarding = 32; optional bool disableSSHAuth = 33; optional int32 sshJWTCacheTTL = 34; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message SetConfigResponse{} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index 66a8efcc325..d5c16ac56f5 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.6.1 -// - protoc v6.33.1 -// source: daemon.proto package proto @@ -15,50 +11,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - DaemonService_Login_FullMethodName = "/daemon.DaemonService/Login" - DaemonService_WaitSSOLogin_FullMethodName = "/daemon.DaemonService/WaitSSOLogin" - DaemonService_Up_FullMethodName = "/daemon.DaemonService/Up" - DaemonService_Status_FullMethodName = "/daemon.DaemonService/Status" - DaemonService_Down_FullMethodName = "/daemon.DaemonService/Down" - DaemonService_GetConfig_FullMethodName = "/daemon.DaemonService/GetConfig" - DaemonService_ListNetworks_FullMethodName = "/daemon.DaemonService/ListNetworks" - DaemonService_SelectNetworks_FullMethodName = "/daemon.DaemonService/SelectNetworks" - DaemonService_DeselectNetworks_FullMethodName = "/daemon.DaemonService/DeselectNetworks" - DaemonService_ForwardingRules_FullMethodName = "/daemon.DaemonService/ForwardingRules" - DaemonService_DebugBundle_FullMethodName = "/daemon.DaemonService/DebugBundle" - DaemonService_GetLogLevel_FullMethodName = "/daemon.DaemonService/GetLogLevel" - DaemonService_SetLogLevel_FullMethodName = "/daemon.DaemonService/SetLogLevel" - DaemonService_ListStates_FullMethodName = "/daemon.DaemonService/ListStates" - DaemonService_CleanState_FullMethodName = "/daemon.DaemonService/CleanState" - DaemonService_DeleteState_FullMethodName = "/daemon.DaemonService/DeleteState" - DaemonService_SetSyncResponsePersistence_FullMethodName = "/daemon.DaemonService/SetSyncResponsePersistence" - DaemonService_TracePacket_FullMethodName = "/daemon.DaemonService/TracePacket" - DaemonService_StartCapture_FullMethodName = "/daemon.DaemonService/StartCapture" - DaemonService_StartBundleCapture_FullMethodName = "/daemon.DaemonService/StartBundleCapture" - DaemonService_StopBundleCapture_FullMethodName = "/daemon.DaemonService/StopBundleCapture" - DaemonService_SubscribeEvents_FullMethodName = "/daemon.DaemonService/SubscribeEvents" - DaemonService_GetEvents_FullMethodName = "/daemon.DaemonService/GetEvents" - DaemonService_SwitchProfile_FullMethodName = "/daemon.DaemonService/SwitchProfile" - DaemonService_SetConfig_FullMethodName = "/daemon.DaemonService/SetConfig" - DaemonService_AddProfile_FullMethodName = "/daemon.DaemonService/AddProfile" - DaemonService_RemoveProfile_FullMethodName = "/daemon.DaemonService/RemoveProfile" - DaemonService_ListProfiles_FullMethodName = "/daemon.DaemonService/ListProfiles" - DaemonService_GetActiveProfile_FullMethodName = "/daemon.DaemonService/GetActiveProfile" - DaemonService_Logout_FullMethodName = "/daemon.DaemonService/Logout" - DaemonService_GetFeatures_FullMethodName = "/daemon.DaemonService/GetFeatures" - DaemonService_TriggerUpdate_FullMethodName = "/daemon.DaemonService/TriggerUpdate" - DaemonService_GetPeerSSHHostKey_FullMethodName = "/daemon.DaemonService/GetPeerSSHHostKey" - DaemonService_RequestJWTAuth_FullMethodName = "/daemon.DaemonService/RequestJWTAuth" - DaemonService_WaitJWTToken_FullMethodName = "/daemon.DaemonService/WaitJWTToken" - DaemonService_StartCPUProfile_FullMethodName = "/daemon.DaemonService/StartCPUProfile" - DaemonService_StopCPUProfile_FullMethodName = "/daemon.DaemonService/StopCPUProfile" - DaemonService_GetInstallerResult_FullMethodName = "/daemon.DaemonService/GetInstallerResult" - DaemonService_ExposeService_FullMethodName = "/daemon.DaemonService/ExposeService" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // DaemonServiceClient is the client API for DaemonService service. // @@ -101,13 +55,13 @@ type DaemonServiceClient interface { TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) + StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) - SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) + SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) @@ -133,7 +87,7 @@ type DaemonServiceClient interface { StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) + ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) } type daemonServiceClient struct { @@ -145,9 +99,8 @@ func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { } func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoginResponse) - err := c.cc.Invoke(ctx, DaemonService_Login_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Login", in, out, opts...) if err != nil { return nil, err } @@ -155,9 +108,8 @@ func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts } func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitSSOLoginResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitSSOLogin_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitSSOLogin", in, out, opts...) if err != nil { return nil, err } @@ -165,9 +117,8 @@ func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLogin } func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpResponse) - err := c.cc.Invoke(ctx, DaemonService_Up_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Up", in, out, opts...) if err != nil { return nil, err } @@ -175,9 +126,8 @@ func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grp } func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatusResponse) - err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Status", in, out, opts...) if err != nil { return nil, err } @@ -185,9 +135,8 @@ func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opt } func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DownResponse) - err := c.cc.Invoke(ctx, DaemonService_Down_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Down", in, out, opts...) if err != nil { return nil, err } @@ -195,9 +144,8 @@ func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts .. } func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_GetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetConfig", in, out, opts...) if err != nil { return nil, err } @@ -205,9 +153,8 @@ func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigReques } func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_ListNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListNetworks", in, out, opts...) if err != nil { return nil, err } @@ -215,9 +162,8 @@ func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworks } func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_SelectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SelectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -225,9 +171,8 @@ func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetw } func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_DeselectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeselectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -235,9 +180,8 @@ func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNe } func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*ForwardingRulesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ForwardingRulesResponse) - err := c.cc.Invoke(ctx, DaemonService_ForwardingRules_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ForwardingRules", in, out, opts...) if err != nil { return nil, err } @@ -245,9 +189,8 @@ func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequ } func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRequest, opts ...grpc.CallOption) (*DebugBundleResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DebugBundleResponse) - err := c.cc.Invoke(ctx, DaemonService_DebugBundle_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DebugBundle", in, out, opts...) if err != nil { return nil, err } @@ -255,9 +198,8 @@ func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRe } func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRequest, opts ...grpc.CallOption) (*GetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_GetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -265,9 +207,8 @@ func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRe } func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRequest, opts ...grpc.CallOption) (*SetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_SetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -275,9 +216,8 @@ func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRe } func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListStatesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListStates_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListStates", in, out, opts...) if err != nil { return nil, err } @@ -285,9 +225,8 @@ func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequ } func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequest, opts ...grpc.CallOption) (*CleanStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CleanStateResponse) - err := c.cc.Invoke(ctx, DaemonService_CleanState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/CleanState", in, out, opts...) if err != nil { return nil, err } @@ -295,9 +234,8 @@ func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequ } func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteStateResponse) - err := c.cc.Invoke(ctx, DaemonService_DeleteState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeleteState", in, out, opts...) if err != nil { return nil, err } @@ -305,9 +243,8 @@ func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRe } func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetSyncResponsePersistenceResponse) - err := c.cc.Invoke(ctx, DaemonService_SetSyncResponsePersistence_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetSyncResponsePersistence", in, out, opts...) if err != nil { return nil, err } @@ -315,22 +252,20 @@ func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in } func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TracePacketResponse) - err := c.cc.Invoke(ctx, DaemonService_TracePacket_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TracePacket", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_StartCapture_FullMethodName, cOpts...) +func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], "/daemon.DaemonService/StartCapture", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[StartCaptureRequest, CapturePacket]{ClientStream: stream} + x := &daemonServiceStartCaptureClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -340,13 +275,26 @@ func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCapture return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureClient = grpc.ServerStreamingClient[CapturePacket] +type DaemonService_StartCaptureClient interface { + Recv() (*CapturePacket, error) + grpc.ClientStream +} + +type daemonServiceStartCaptureClient struct { + grpc.ClientStream +} + +func (x *daemonServiceStartCaptureClient) Recv() (*CapturePacket, error) { + m := new(CapturePacket) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StartBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartBundleCapture", in, out, opts...) if err != nil { return nil, err } @@ -354,22 +302,20 @@ func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartB } func (c *daemonServiceClient) StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StopBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopBundleCapture", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_SubscribeEvents_FullMethodName, cOpts...) +func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], "/daemon.DaemonService/SubscribeEvents", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[SubscribeRequest, SystemEvent]{ClientStream: stream} + x := &daemonServiceSubscribeEventsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -379,13 +325,26 @@ func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *Subscribe return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsClient = grpc.ServerStreamingClient[SystemEvent] +type DaemonService_SubscribeEventsClient interface { + Recv() (*SystemEvent, error) + grpc.ClientStream +} + +type daemonServiceSubscribeEventsClient struct { + grpc.ClientStream +} + +func (x *daemonServiceSubscribeEventsClient) Recv() (*SystemEvent, error) { + m := new(SystemEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetEventsResponse) - err := c.cc.Invoke(ctx, DaemonService_GetEvents_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetEvents", in, out, opts...) if err != nil { return nil, err } @@ -393,9 +352,8 @@ func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsReques } func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SwitchProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_SwitchProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SwitchProfile", in, out, opts...) if err != nil { return nil, err } @@ -403,9 +361,8 @@ func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfi } func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_SetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetConfig", in, out, opts...) if err != nil { return nil, err } @@ -413,9 +370,8 @@ func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigReques } func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequest, opts ...grpc.CallOption) (*AddProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_AddProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/AddProfile", in, out, opts...) if err != nil { return nil, err } @@ -423,9 +379,8 @@ func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequ } func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfileRequest, opts ...grpc.CallOption) (*RemoveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RemoveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_RemoveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RemoveProfile", in, out, opts...) if err != nil { return nil, err } @@ -433,9 +388,8 @@ func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfi } func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfilesRequest, opts ...grpc.CallOption) (*ListProfilesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListProfilesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListProfiles_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListProfiles", in, out, opts...) if err != nil { return nil, err } @@ -443,9 +397,8 @@ func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfiles } func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiveProfileRequest, opts ...grpc.CallOption) (*GetActiveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetActiveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_GetActiveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetActiveProfile", in, out, opts...) if err != nil { return nil, err } @@ -453,9 +406,8 @@ func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiv } func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LogoutResponse) - err := c.cc.Invoke(ctx, DaemonService_Logout_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Logout", in, out, opts...) if err != nil { return nil, err } @@ -463,9 +415,8 @@ func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opt } func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetFeaturesResponse) - err := c.cc.Invoke(ctx, DaemonService_GetFeatures_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetFeatures", in, out, opts...) if err != nil { return nil, err } @@ -473,9 +424,8 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe } func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TriggerUpdateResponse) - err := c.cc.Invoke(ctx, DaemonService_TriggerUpdate_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...) if err != nil { return nil, err } @@ -483,9 +433,8 @@ func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpda } func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPeerSSHHostKeyResponse) - err := c.cc.Invoke(ctx, DaemonService_GetPeerSSHHostKey_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...) if err != nil { return nil, err } @@ -493,9 +442,8 @@ func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeer } func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RequestJWTAuthResponse) - err := c.cc.Invoke(ctx, DaemonService_RequestJWTAuth_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RequestJWTAuth", in, out, opts...) if err != nil { return nil, err } @@ -503,9 +451,8 @@ func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWT } func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitJWTTokenResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitJWTToken_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitJWTToken", in, out, opts...) if err != nil { return nil, err } @@ -513,9 +460,8 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken } func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StartCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -523,9 +469,8 @@ func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUP } func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StopCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -533,22 +478,20 @@ func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUPro } func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InstallerResultResponse) - err := c.cc.Invoke(ctx, DaemonService_GetInstallerResult_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetInstallerResult", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], DaemonService_ExposeService_FullMethodName, cOpts...) +func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], "/daemon.DaemonService/ExposeService", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ExposeServiceRequest, ExposeServiceEvent]{ClientStream: stream} + x := &daemonServiceExposeServiceClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -558,12 +501,26 @@ func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServi return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceClient = grpc.ServerStreamingClient[ExposeServiceEvent] +type DaemonService_ExposeServiceClient interface { + Recv() (*ExposeServiceEvent, error) + grpc.ClientStream +} + +type daemonServiceExposeServiceClient struct { + grpc.ClientStream +} + +func (x *daemonServiceExposeServiceClient) Recv() (*ExposeServiceEvent, error) { + m := new(ExposeServiceEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer -// for forward compatibility. +// for forward compatibility type DaemonServiceServer interface { // Login uses setup key to prepare configuration for the daemon. Login(context.Context, *LoginRequest) (*LoginResponse, error) @@ -602,13 +559,13 @@ type DaemonServiceServer interface { TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error + StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) - SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error + SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) @@ -634,136 +591,132 @@ type DaemonServiceServer interface { StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error + ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error mustEmbedUnimplementedDaemonServiceServer() } -// UnimplementedDaemonServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedDaemonServiceServer struct{} +// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations. +type UnimplementedDaemonServiceServer struct { +} func (UnimplementedDaemonServiceServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Login not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Login not implemented") } func (UnimplementedDaemonServiceServer) WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitSSOLogin not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitSSOLogin not implemented") } func (UnimplementedDaemonServiceServer) Up(context.Context, *UpRequest) (*UpResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Up not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Up not implemented") } func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Status not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") } func (UnimplementedDaemonServiceServer) Down(context.Context, *DownRequest) (*DownResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Down not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Down not implemented") } func (UnimplementedDaemonServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented") } func (UnimplementedDaemonServiceServer) ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListNetworks not implemented") } func (UnimplementedDaemonServiceServer) SelectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SelectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SelectNetworks not implemented") } func (UnimplementedDaemonServiceServer) DeselectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeselectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeselectNetworks not implemented") } func (UnimplementedDaemonServiceServer) ForwardingRules(context.Context, *EmptyRequest) (*ForwardingRulesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ForwardingRules not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ForwardingRules not implemented") } func (UnimplementedDaemonServiceServer) DebugBundle(context.Context, *DebugBundleRequest) (*DebugBundleResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DebugBundle not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DebugBundle not implemented") } func (UnimplementedDaemonServiceServer) GetLogLevel(context.Context, *GetLogLevelRequest) (*GetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) SetLogLevel(context.Context, *SetLogLevelRequest) (*SetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListStates not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListStates not implemented") } func (UnimplementedDaemonServiceServer) CleanState(context.Context, *CleanStateRequest) (*CleanStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CleanState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method CleanState not implemented") } func (UnimplementedDaemonServiceServer) DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeleteState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeleteState not implemented") } func (UnimplementedDaemonServiceServer) SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") } func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TracePacket not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TracePacket not implemented") } -func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error { - return status.Error(codes.Unimplemented, "method StartCapture not implemented") +func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error { + return status.Errorf(codes.Unimplemented, "method StartCapture not implemented") } func (UnimplementedDaemonServiceServer) StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartBundleCapture not implemented") } func (UnimplementedDaemonServiceServer) StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopBundleCapture not implemented") } -func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error { - return status.Error(codes.Unimplemented, "method SubscribeEvents not implemented") +func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeEvents not implemented") } func (UnimplementedDaemonServiceServer) GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetEvents not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetEvents not implemented") } func (UnimplementedDaemonServiceServer) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SwitchProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SwitchProfile not implemented") } func (UnimplementedDaemonServiceServer) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetConfig not implemented") } func (UnimplementedDaemonServiceServer) AddProfile(context.Context, *AddProfileRequest) (*AddProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method AddProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method AddProfile not implemented") } func (UnimplementedDaemonServiceServer) RemoveProfile(context.Context, *RemoveProfileRequest) (*RemoveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RemoveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RemoveProfile not implemented") } func (UnimplementedDaemonServiceServer) ListProfiles(context.Context, *ListProfilesRequest) (*ListProfilesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListProfiles not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListProfiles not implemented") } func (UnimplementedDaemonServiceServer) GetActiveProfile(context.Context, *GetActiveProfileRequest) (*GetActiveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetActiveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetActiveProfile not implemented") } func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Logout not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetFeatures not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented") } func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TriggerUpdate not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented") } func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") } func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RequestJWTAuth not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RequestJWTAuth not implemented") } func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitJWTToken not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") } func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetInstallerResult not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetInstallerResult not implemented") } -func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error { - return status.Error(codes.Unimplemented, "method ExposeService not implemented") +func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error { + return status.Errorf(codes.Unimplemented, "method ExposeService not implemented") } func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} -func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to DaemonServiceServer will @@ -773,13 +726,6 @@ type UnsafeDaemonServiceServer interface { } func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { - // If the following call panics, it indicates UnimplementedDaemonServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&DaemonService_ServiceDesc, srv) } @@ -793,7 +739,7 @@ func _DaemonService_Login_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Login_FullMethodName, + FullMethod: "/daemon.DaemonService/Login", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Login(ctx, req.(*LoginRequest)) @@ -811,7 +757,7 @@ func _DaemonService_WaitSSOLogin_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitSSOLogin_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitSSOLogin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitSSOLogin(ctx, req.(*WaitSSOLoginRequest)) @@ -829,7 +775,7 @@ func _DaemonService_Up_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Up_FullMethodName, + FullMethod: "/daemon.DaemonService/Up", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Up(ctx, req.(*UpRequest)) @@ -847,7 +793,7 @@ func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Status_FullMethodName, + FullMethod: "/daemon.DaemonService/Status", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) @@ -865,7 +811,7 @@ func _DaemonService_Down_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Down_FullMethodName, + FullMethod: "/daemon.DaemonService/Down", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Down(ctx, req.(*DownRequest)) @@ -883,7 +829,7 @@ func _DaemonService_GetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/GetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) @@ -901,7 +847,7 @@ func _DaemonService_ListNetworks_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/ListNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListNetworks(ctx, req.(*ListNetworksRequest)) @@ -919,7 +865,7 @@ func _DaemonService_SelectNetworks_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SelectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/SelectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SelectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -937,7 +883,7 @@ func _DaemonService_DeselectNetworks_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeselectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/DeselectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeselectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -955,7 +901,7 @@ func _DaemonService_ForwardingRules_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ForwardingRules_FullMethodName, + FullMethod: "/daemon.DaemonService/ForwardingRules", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ForwardingRules(ctx, req.(*EmptyRequest)) @@ -973,7 +919,7 @@ func _DaemonService_DebugBundle_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DebugBundle_FullMethodName, + FullMethod: "/daemon.DaemonService/DebugBundle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DebugBundle(ctx, req.(*DebugBundleRequest)) @@ -991,7 +937,7 @@ func _DaemonService_GetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/GetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetLogLevel(ctx, req.(*GetLogLevelRequest)) @@ -1009,7 +955,7 @@ func _DaemonService_SetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/SetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetLogLevel(ctx, req.(*SetLogLevelRequest)) @@ -1027,7 +973,7 @@ func _DaemonService_ListStates_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListStates_FullMethodName, + FullMethod: "/daemon.DaemonService/ListStates", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListStates(ctx, req.(*ListStatesRequest)) @@ -1045,7 +991,7 @@ func _DaemonService_CleanState_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_CleanState_FullMethodName, + FullMethod: "/daemon.DaemonService/CleanState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).CleanState(ctx, req.(*CleanStateRequest)) @@ -1063,7 +1009,7 @@ func _DaemonService_DeleteState_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeleteState_FullMethodName, + FullMethod: "/daemon.DaemonService/DeleteState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeleteState(ctx, req.(*DeleteStateRequest)) @@ -1081,7 +1027,7 @@ func _DaemonService_SetSyncResponsePersistence_Handler(srv interface{}, ctx cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetSyncResponsePersistence_FullMethodName, + FullMethod: "/daemon.DaemonService/SetSyncResponsePersistence", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetSyncResponsePersistence(ctx, req.(*SetSyncResponsePersistenceRequest)) @@ -1099,7 +1045,7 @@ func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TracePacket_FullMethodName, + FullMethod: "/daemon.DaemonService/TracePacket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TracePacket(ctx, req.(*TracePacketRequest)) @@ -1112,11 +1058,21 @@ func _DaemonService_StartCapture_Handler(srv interface{}, stream grpc.ServerStre if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).StartCapture(m, &grpc.GenericServerStream[StartCaptureRequest, CapturePacket]{ServerStream: stream}) + return srv.(DaemonServiceServer).StartCapture(m, &daemonServiceStartCaptureServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureServer = grpc.ServerStreamingServer[CapturePacket] +type DaemonService_StartCaptureServer interface { + Send(*CapturePacket) error + grpc.ServerStream +} + +type daemonServiceStartCaptureServer struct { + grpc.ServerStream +} + +func (x *daemonServiceStartCaptureServer) Send(m *CapturePacket) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartBundleCaptureRequest) @@ -1128,7 +1084,7 @@ func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StartBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartBundleCapture(ctx, req.(*StartBundleCaptureRequest)) @@ -1146,7 +1102,7 @@ func _DaemonService_StopBundleCapture_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StopBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopBundleCapture(ctx, req.(*StopBundleCaptureRequest)) @@ -1159,11 +1115,21 @@ func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerS if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).SubscribeEvents(m, &grpc.GenericServerStream[SubscribeRequest, SystemEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).SubscribeEvents(m, &daemonServiceSubscribeEventsServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsServer = grpc.ServerStreamingServer[SystemEvent] +type DaemonService_SubscribeEventsServer interface { + Send(*SystemEvent) error + grpc.ServerStream +} + +type daemonServiceSubscribeEventsServer struct { + grpc.ServerStream +} + +func (x *daemonServiceSubscribeEventsServer) Send(m *SystemEvent) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetEventsRequest) @@ -1175,7 +1141,7 @@ func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetEvents_FullMethodName, + FullMethod: "/daemon.DaemonService/GetEvents", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetEvents(ctx, req.(*GetEventsRequest)) @@ -1193,7 +1159,7 @@ func _DaemonService_SwitchProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SwitchProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/SwitchProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SwitchProfile(ctx, req.(*SwitchProfileRequest)) @@ -1211,7 +1177,7 @@ func _DaemonService_SetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/SetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetConfig(ctx, req.(*SetConfigRequest)) @@ -1229,7 +1195,7 @@ func _DaemonService_AddProfile_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_AddProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/AddProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).AddProfile(ctx, req.(*AddProfileRequest)) @@ -1247,7 +1213,7 @@ func _DaemonService_RemoveProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RemoveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/RemoveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RemoveProfile(ctx, req.(*RemoveProfileRequest)) @@ -1265,7 +1231,7 @@ func _DaemonService_ListProfiles_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListProfiles_FullMethodName, + FullMethod: "/daemon.DaemonService/ListProfiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListProfiles(ctx, req.(*ListProfilesRequest)) @@ -1283,7 +1249,7 @@ func _DaemonService_GetActiveProfile_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetActiveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/GetActiveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetActiveProfile(ctx, req.(*GetActiveProfileRequest)) @@ -1301,7 +1267,7 @@ func _DaemonService_Logout_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Logout_FullMethodName, + FullMethod: "/daemon.DaemonService/Logout", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Logout(ctx, req.(*LogoutRequest)) @@ -1319,7 +1285,7 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetFeatures_FullMethodName, + FullMethod: "/daemon.DaemonService/GetFeatures", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetFeatures(ctx, req.(*GetFeaturesRequest)) @@ -1337,7 +1303,7 @@ func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TriggerUpdate_FullMethodName, + FullMethod: "/daemon.DaemonService/TriggerUpdate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest)) @@ -1355,7 +1321,7 @@ func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetPeerSSHHostKey_FullMethodName, + FullMethod: "/daemon.DaemonService/GetPeerSSHHostKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetPeerSSHHostKey(ctx, req.(*GetPeerSSHHostKeyRequest)) @@ -1373,7 +1339,7 @@ func _DaemonService_RequestJWTAuth_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RequestJWTAuth_FullMethodName, + FullMethod: "/daemon.DaemonService/RequestJWTAuth", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RequestJWTAuth(ctx, req.(*RequestJWTAuthRequest)) @@ -1391,7 +1357,7 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitJWTToken_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitJWTToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitJWTToken(ctx, req.(*WaitJWTTokenRequest)) @@ -1409,7 +1375,7 @@ func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StartCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) @@ -1427,7 +1393,7 @@ func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StopCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) @@ -1445,7 +1411,7 @@ func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetInstallerResult_FullMethodName, + FullMethod: "/daemon.DaemonService/GetInstallerResult", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetInstallerResult(ctx, req.(*InstallerResultRequest)) @@ -1458,11 +1424,21 @@ func _DaemonService_ExposeService_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).ExposeService(m, &grpc.GenericServerStream[ExposeServiceRequest, ExposeServiceEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).ExposeService(m, &daemonServiceExposeServiceServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceServer = grpc.ServerStreamingServer[ExposeServiceEvent] +type DaemonService_ExposeServiceServer interface { + Send(*ExposeServiceEvent) error + grpc.ServerStream +} + +type daemonServiceExposeServiceServer struct { + grpc.ServerStream +} + +func (x *daemonServiceExposeServiceServer) Send(m *ExposeServiceEvent) error { + return x.ServerStream.SendMsg(m) +} // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/client/server/debug.go b/client/server/debug.go index 33247db5f12..3a2dd275ae0 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -57,16 +57,40 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( } } + // Phase 3.7h (#5989): capture server-pushed mode + timers from ConnMgr so + // the debug bundle's config.txt records both effective and configured + // values. Empty/zero if engine or ConnMgr aren't available yet. + var ( + spMode string + spRelayTOSec uint32 + spP2pTOSec uint32 + spP2pRetMax uint32 + ) + if s.connectClient != nil { + if eng := s.connectClient.Engine(); eng != nil { + if cm := eng.ConnMgr(); cm != nil { + spMode = cm.ServerPushedMode().String() + spRelayTOSec = cm.ServerPushedRelayTimeoutSecs() + spP2pTOSec = cm.ServerPushedP2pTimeoutSecs() + spP2pRetMax = cm.ServerPushedP2pRetryMaxSecs() + } + } + } + bundleGenerator := debug.NewBundleGenerator( debug.GeneratorDependencies{ - InternalConfig: s.config, - StatusRecorder: s.statusRecorder, - SyncResponse: syncResponse, - LogPath: s.logFile, - CPUProfile: cpuProfileData, - CapturePath: capturePath, - RefreshStatus: refreshStatus, - ClientMetrics: clientMetrics, + InternalConfig: s.config, + StatusRecorder: s.statusRecorder, + SyncResponse: syncResponse, + LogPath: s.logFile, + CPUProfile: cpuProfileData, + CapturePath: capturePath, + RefreshStatus: refreshStatus, + ClientMetrics: clientMetrics, + ServerPushedConnectionMode: spMode, + ServerPushedRelayTimeoutSec: spRelayTOSec, + ServerPushedP2pTimeoutSec: spP2pTOSec, + ServerPushedP2pRetryMaxSec: spP2pRetMax, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), diff --git a/client/server/server.go b/client/server/server.go index 648ffa8ce6a..2e985a7a66c 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1515,31 +1515,61 @@ func (s *Server) GetConfig(ctx context.Context, req *proto.GetConfigRequest) (*p sshJWTCacheTTL = int32(*cfg.SSHJWTCacheTTL) } + // Surface what the management server most recently pushed via + // PeerConfig so the UI can show "Follow server (currently: )" + // and use the numeric defaults as placeholders in the override + // fields. All zero/empty when the engine has not received PeerConfig + // yet -- the UI handles that gracefully. + var ( + spMode string + spRelayTOSecs uint32 + spP2pTOSecs uint32 + spP2pRetMax uint32 + ) + if s.connectClient != nil { + if eng := s.connectClient.Engine(); eng != nil { + if cm := eng.ConnMgr(); cm != nil { + spMode = cm.ServerPushedMode().String() + spRelayTOSecs = cm.ServerPushedRelayTimeoutSecs() + spP2pTOSecs = cm.ServerPushedP2pTimeoutSecs() + spP2pRetMax = cm.ServerPushedP2pRetryMaxSecs() + } + } + } + return &proto.GetConfigResponse{ - ManagementUrl: managementURL.String(), - PreSharedKey: preSharedKey, - AdminURL: adminURL.String(), - InterfaceName: cfg.WgIface, - WireguardPort: int64(cfg.WgPort), - Mtu: int64(cfg.MTU), - DisableAutoConnect: cfg.DisableAutoConnect, - ServerSSHAllowed: *cfg.ServerSSHAllowed, - RosenpassEnabled: cfg.RosenpassEnabled, - RosenpassPermissive: cfg.RosenpassPermissive, - LazyConnectionEnabled: cfg.LazyConnectionEnabled, - BlockInbound: cfg.BlockInbound, - DisableNotifications: disableNotifications, - NetworkMonitor: networkMonitor, - DisableDns: disableDNS, - DisableClientRoutes: disableClientRoutes, - DisableServerRoutes: disableServerRoutes, - BlockLanAccess: blockLANAccess, - EnableSSHRoot: enableSSHRoot, - EnableSSHSFTP: enableSSHSFTP, - EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, - EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, - DisableSSHAuth: disableSSHAuth, - SshJWTCacheTTL: sshJWTCacheTTL, + ManagementUrl: managementURL.String(), + PreSharedKey: preSharedKey, + AdminURL: adminURL.String(), + InterfaceName: cfg.WgIface, + WireguardPort: int64(cfg.WgPort), + Mtu: int64(cfg.MTU), + DisableAutoConnect: cfg.DisableAutoConnect, + ServerSSHAllowed: *cfg.ServerSSHAllowed, + RosenpassEnabled: cfg.RosenpassEnabled, + RosenpassPermissive: cfg.RosenpassPermissive, + LazyConnectionEnabled: cfg.LazyConnectionEnabled, + BlockInbound: cfg.BlockInbound, + DisableNotifications: disableNotifications, + NetworkMonitor: networkMonitor, + DisableDns: disableDNS, + DisableClientRoutes: disableClientRoutes, + DisableServerRoutes: disableServerRoutes, + BlockLanAccess: blockLANAccess, + EnableSSHRoot: enableSSHRoot, + EnableSSHSFTP: enableSSHSFTP, + EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, + EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, + DisableSSHAuth: disableSSHAuth, + SshJWTCacheTTL: sshJWTCacheTTL, + ConnectionMode: cfg.ConnectionMode, + P2PTimeoutSeconds: cfg.P2pTimeoutSeconds, + RelayTimeoutSeconds: cfg.RelayTimeoutSeconds, + P2PRetryMaxSeconds: cfg.P2pRetryMaxSeconds, + ServerPushedConnectionMode: spMode, + ServerPushedRelayTimeoutSeconds: spRelayTOSecs, + ServerPushedP2PTimeoutSeconds: spP2pTOSecs, + ServerPushedP2PRetryMaxSeconds: spP2pRetMax, }, nil } diff --git a/client/server/server_test.go b/client/server/server_test.go index 641cd85fefe..a91e1dd6014 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/groups" @@ -335,7 +336,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(5*time.Minute), peer_connections.NewSnapshotRouter()) if err != nil { return nil, "", err } diff --git a/client/server/setconfig_test.go b/client/server/setconfig_test.go index b90b5653dc4..9d8ce003e5b 100644 --- a/client/server/setconfig_test.go +++ b/client/server/setconfig_test.go @@ -201,6 +201,17 @@ func verifyAllFieldsCovered(t *testing.T, req *proto.SetConfigRequest) { "EnableSSHRemotePortForwarding": true, "DisableSSHAuth": true, "SshJWTCacheTTL": true, + // Phase 3.7i Connection-Mode fields. Currently in the proto so + // daemons can advertise them via GetConfig, but SetConfig does + // NOT apply them at runtime — they're only persisted via + // `netbird service install/reconfigure --connection-mode/...` + // (writes the active profile file directly; daemon picks up on + // next start). Wiring them through SetConfig is a follow-up + // task. Listed here so the structural test passes. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } val := reflect.ValueOf(req).Elem() @@ -265,6 +276,17 @@ func TestCLIFlags_MappedToSetConfig(t *testing.T) { // SetConfigRequest fields that don't have CLI flags (settable only via UI or other means). fieldsWithoutCLIFlags := map[string]bool{ "DisableNotifications": true, // Only settable via UI + // Phase 3.7i Connection-Mode fields: have CLI flags + // (--connection-mode, --relay-timeout, --p2p-timeout, + // --p2p-retry-max) but those flags belong to the + // `netbird service install/reconfigure` command, not `up`, + // and they bypass the SetConfig RPC entirely (write directly + // to the active profile file). So from this test's + // perspective they have no SetConfig-mapped CLI flag. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } // Get all SetConfigRequest fields to verify our map is complete. diff --git a/client/status/status.go b/client/status/status.go index 8c932bbab29..9cb93b882d4 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -55,6 +55,20 @@ type ConvertOptions struct { IPsFilter map[string]struct{} ConnectionTypeFilter string ProfileName string + + // Phase 1+2+3 (#5989) connection-mode + lifecycle timers — effective + // resolved values from GetConfig RPC. Empty/zero ok if RPC unavailable. + ConnectionMode string + RelayTimeoutSeconds uint32 + P2pTimeoutSeconds uint32 + P2pRetryMaxSeconds uint32 + + // Phase 3.7h (#5989) values most recently pushed by the management + // server via PeerConfig, independent of any local override. + ServerPushedConnectionMode string + ServerPushedRelayTimeoutSeconds uint32 + ServerPushedP2pTimeoutSeconds uint32 + ServerPushedP2pRetryMaxSeconds uint32 } type PeerStateDetailOutput struct { @@ -73,6 +87,10 @@ type PeerStateDetailOutput struct { Latency time.Duration `json:"latency" yaml:"latency"` RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` Networks []string `json:"networks" yaml:"networks"` + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int `json:"iceBackoffFailures" yaml:"iceBackoffFailures"` + IceBackoffNextRetry time.Time `json:"iceBackoffNextRetry" yaml:"iceBackoffNextRetry"` + IceBackoffSuspended bool `json:"iceBackoffSuspended" yaml:"iceBackoffSuspended"` } type PeersStateOutput struct { @@ -151,6 +169,21 @@ type OutputOverview struct { LazyConnectionEnabled bool `json:"lazyConnectionEnabled" yaml:"lazyConnectionEnabled"` ProfileName string `json:"profileName" yaml:"profileName"` SSHServerState SSHServerStateOutput `json:"sshServer" yaml:"sshServer"` + + // Phase 1+2+3 (#5989) connection-mode + lifecycle timers. Effective values + // (after env > local-config > server-push resolution). + ConnectionMode string `json:"connectionMode" yaml:"connectionMode"` + RelayTimeoutSeconds uint32 `json:"relayTimeoutSeconds" yaml:"relayTimeoutSeconds"` + P2pTimeoutSeconds uint32 `json:"p2pTimeoutSeconds" yaml:"p2pTimeoutSeconds"` + P2pRetryMaxSeconds uint32 `json:"p2pRetryMaxSeconds" yaml:"p2pRetryMaxSeconds"` + + // Phase 3.7h (#5989) values most recently pushed by the management server + // via PeerConfig, independent of any local override. Empty/zero when no + // PeerConfig has been received yet. + ServerPushedConnectionMode string `json:"serverPushedConnectionMode" yaml:"serverPushedConnectionMode"` + ServerPushedRelayTimeoutSeconds uint32 `json:"serverPushedRelayTimeoutSeconds" yaml:"serverPushedRelayTimeoutSeconds"` + ServerPushedP2pTimeoutSeconds uint32 `json:"serverPushedP2pTimeoutSeconds" yaml:"serverPushedP2pTimeoutSeconds"` + ServerPushedP2pRetryMaxSeconds uint32 `json:"serverPushedP2pRetryMaxSeconds" yaml:"serverPushedP2pRetryMaxSeconds"` } // ConvertToStatusOutputOverview converts protobuf status to the output overview. @@ -194,6 +227,15 @@ func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, opts ConvertO LazyConnectionEnabled: pbFullStatus.GetLazyConnectionEnabled(), ProfileName: opts.ProfileName, SSHServerState: sshServerOverview, + + ConnectionMode: opts.ConnectionMode, + RelayTimeoutSeconds: opts.RelayTimeoutSeconds, + P2pTimeoutSeconds: opts.P2pTimeoutSeconds, + P2pRetryMaxSeconds: opts.P2pRetryMaxSeconds, + ServerPushedConnectionMode: opts.ServerPushedConnectionMode, + ServerPushedRelayTimeoutSeconds: opts.ServerPushedRelayTimeoutSeconds, + ServerPushedP2pTimeoutSeconds: opts.ServerPushedP2pTimeoutSeconds, + ServerPushedP2pRetryMaxSeconds: opts.ServerPushedP2pRetryMaxSeconds, } if opts.Anonymize { @@ -337,6 +379,9 @@ func mapPeers( Latency: pbPeerState.GetLatency().AsDuration(), RosenpassEnabled: pbPeerState.GetRosenpassEnabled(), Networks: pbPeerState.GetNetworks(), + IceBackoffFailures: int(pbPeerState.GetIceBackoffFailures()), + IceBackoffNextRetry: iceBackoffNextRetry(pbPeerState), + IceBackoffSuspended: pbPeerState.GetIceBackoffSuspended(), } peersStateDetail = append(peersStateDetail, peerState) @@ -538,6 +583,8 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS goarm = fmt.Sprintf(" (ARMv%s)", os.Getenv("GOARM")) } + connectionModeBlock := formatConnectionModeBlock(o) + summary := fmt.Sprintf( "OS: %s\n"+ "Daemon version: %s\n"+ @@ -552,6 +599,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS "Interface type: %s\n"+ "Quantum resistance: %s\n"+ "Lazy connection: %s\n"+ + "%s"+ "SSH Server: %s\n"+ "Networks: %s\n"+ "%s"+ @@ -569,6 +617,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS interfaceTypeString, rosenpassEnabledStatus, lazyConnectionEnabledStatus, + connectionModeBlock, sshServerStatus, networks, forwardingRulesString, @@ -577,6 +626,38 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS return summary } +// formatConnectionModeBlock renders the Phase-3.7h connection-mode + timer +// values, showing both the effective and the server-pushed values so operators +// can tell at a glance which side of the resolution applies to a given peer. +// Returns an empty string if no values are present (e.g., older daemon that +// doesn't populate ConvertOptions). Otherwise returns a multi-line block ending +// in '\n' for direct insertion into the summary template. +func formatConnectionModeBlock(o *OutputOverview) string { + allEmpty := o.ConnectionMode == "" && + o.RelayTimeoutSeconds == 0 && o.P2pTimeoutSeconds == 0 && o.P2pRetryMaxSeconds == 0 && + o.ServerPushedConnectionMode == "" && + o.ServerPushedRelayTimeoutSeconds == 0 && o.ServerPushedP2pTimeoutSeconds == 0 && o.ServerPushedP2pRetryMaxSeconds == 0 + if allEmpty { + return "" + } + effective := o.ConnectionMode + if effective == "" { + effective = "(default)" + } + pushed := o.ServerPushedConnectionMode + if pushed == "" { + pushed = "(none received)" + } + return fmt.Sprintf( + "Connection mode (effective): %s\n"+ + " Relay timeout: %ds, P2P timeout: %ds, P2P retry max: %ds\n"+ + "Connection mode (server-pushed): %s\n"+ + " Relay timeout: %ds, P2P timeout: %ds, P2P retry max: %ds\n", + effective, o.RelayTimeoutSeconds, o.P2pTimeoutSeconds, o.P2pRetryMaxSeconds, + pushed, o.ServerPushedRelayTimeoutSeconds, o.ServerPushedP2pTimeoutSeconds, o.ServerPushedP2pRetryMaxSeconds, + ) +} + // FullDetailSummary returns a full detailed summary with peer details and events. func (o *OutputOverview) FullDetailSummary() string { parsedPeersString := parsePeers(o.Peers, o.RosenpassEnabled, o.RosenpassPermissive) @@ -645,6 +726,9 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { Networks: maps.Keys(peerState.GetRoutes()), Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } @@ -683,6 +767,17 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { return &pbFullStatus } +// iceBackoffNextRetry returns the ICE backoff next-retry time from a proto +// PeerState. If the timestamp field is unset (nil), it returns Go's zero +// time to match the daemon's zero-valued State.IceBackoffNextRetry. +func iceBackoffNextRetry(pbPeerState *proto.PeerState) time.Time { + ts := pbPeerState.GetIceBackoffNextRetry() + if ts == nil { + return time.Time{} + } + return ts.AsTime().Local() +} + func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bool) string { var ( peersString = "" @@ -768,6 +863,21 @@ func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bo peerState.Latency.String(), ) + // Phase 3 (#5989): append ICE-backoff line only when suspended AND + // the suspension has not yet expired by wall-clock. The PeerState + // snapshot is only refreshed on ICE state-change events, so the + // suspended-flag stays true even after nextRetry has passed; the + // time-check here suppresses the noise for already-expired windows. + if peerState.IceBackoffSuspended && time.Now().Before(peerState.IceBackoffNextRetry) { + remaining := time.Until(peerState.IceBackoffNextRetry).Round(time.Second) + peerString += fmt.Sprintf( + " ICE backoff: suspended for %s (failure #%d, retry at %s)\n", + remaining, + peerState.IceBackoffFailures, + peerState.IceBackoffNextRetry.Format("15:04:05"), + ) + } + peersString += peerString } return peersString diff --git a/client/status/status_test.go b/client/status/status_test.go index 7754eebae97..932ad14cdb1 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -304,7 +304,10 @@ func TestParsingToJSON(t *testing.T) { "quantumResistance": false, "networks": [ "10.1.0.0/24" - ] + ], + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false }, { "fqdn": "peer-2.awesome-domain.com", @@ -327,7 +330,10 @@ func TestParsingToJSON(t *testing.T) { "transferSent": 1000, "latency": 10000000, "quantumResistance": false, - "networks": null + "networks": null, + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false } ] }, @@ -398,7 +404,15 @@ func TestParsingToJSON(t *testing.T) { "sshServer":{ "enabled":false, "sessions":[] - } + }, + "connectionMode":"", + "relayTimeoutSeconds":0, + "p2pTimeoutSeconds":0, + "p2pRetryMaxSeconds":0, + "serverPushedConnectionMode":"", + "serverPushedRelayTimeoutSeconds":0, + "serverPushedP2pTimeoutSeconds":0, + "serverPushedP2pRetryMaxSeconds":0 }` // @formatter:on @@ -436,6 +450,9 @@ func TestParsingToYAML(t *testing.T) { quantumResistance: false networks: - 10.1.0.0/24 + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false - fqdn: peer-2.awesome-domain.com netbirdIp: 192.168.178.102 publicKey: Pubkey2 @@ -455,6 +472,9 @@ func TestParsingToYAML(t *testing.T) { latency: 10ms quantumResistance: false networks: [] + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false cliVersion: development daemonVersion: 0.14.1 daemonStatus: Connected @@ -505,6 +525,14 @@ profileName: "" sshServer: enabled: false sessions: [] +connectionMode: "" +relayTimeoutSeconds: 0 +p2pTimeoutSeconds: 0 +p2pRetryMaxSeconds: 0 +serverPushedConnectionMode: "" +serverPushedRelayTimeoutSeconds: 0 +serverPushedP2pTimeoutSeconds: 0 +serverPushedP2pRetryMaxSeconds: 0 ` assert.Equal(t, expectedYAML, yaml) diff --git a/client/system/features.go b/client/system/features.go new file mode 100644 index 00000000000..a910820aa6e --- /dev/null +++ b/client/system/features.go @@ -0,0 +1,27 @@ +package system + +// Phase 3.7i (#5989): keywords this client build implements that the +// management server may want to know about. Sent in +// PeerSystemMeta.SupportedFeatures on every Login/Sync. +// +// Adding a new keyword: +// 1. Append it here (new entries at the end so test diffs stay small). +// 2. Update the test in features_test.go. +// 3. Add the server-side branch that consumes it (typically in +// management/internals/shared/grpc/conversion.go) or document +// explicitly that none is needed. +// +// Removing a keyword: only safe if no live management server still +// branches on it. Coordinate with the mgmt-server release. +var supportedFeatures = []string{ + "p2p_dynamic", +} + +// SupportedFeatures returns the list of capability keywords this build +// advertises. Returns a fresh slice so callers cannot mutate the +// underlying global list. +func SupportedFeatures() []string { + out := make([]string, len(supportedFeatures)) + copy(out, supportedFeatures) + return out +} diff --git a/client/system/features_test.go b/client/system/features_test.go new file mode 100644 index 00000000000..31ac15168c6 --- /dev/null +++ b/client/system/features_test.go @@ -0,0 +1,40 @@ +package system + +import ( + "slices" + "testing" +) + +// Phase 3.7i: this test pins down the exact list of capability keywords +// this NetBird build advertises. The list ships out to the management +// server in PeerSystemMeta.SupportedFeatures and the server uses it to +// decide whether to send legacy-compat fallback settings (e.g. downgrade +// to p2p-lazy when the client lacks "p2p_dynamic"). +// +// Reviewers: when adding a new capability, also add a corresponding +// server-side branch (or document explicitly that none is needed). +func TestSupportedFeatures_PinsCurrentList(t *testing.T) { + got := SupportedFeatures() + want := []string{ + "p2p_dynamic", + } + if !slices.Equal(got, want) { + t.Errorf("supported features changed:\n got: %v\n want: %v", got, want) + } +} + +// SupportedFeatures must return a defensive copy so callers cannot +// mutate the global list. +func TestSupportedFeatures_ReturnsCopy(t *testing.T) { + a := SupportedFeatures() + b := SupportedFeatures() + if len(a) > 0 && &a[0] == &b[0] { + t.Fatal("SupportedFeatures must return a fresh slice each call") + } + if len(a) > 0 { + a[0] = "mutated" + if SupportedFeatures()[0] == "mutated" { + t.Fatal("global list was mutated through caller's slice") + } + } +} diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 28f98ae59ae..53b524162ec 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -251,7 +251,6 @@ type serviceClient struct { mAllowSSH *systray.MenuItem mAutoConnect *systray.MenuItem mEnableRosenpass *systray.MenuItem - mLazyConnEnabled *systray.MenuItem mBlockInbound *systray.MenuItem mNotifications *systray.MenuItem mAdvancedSettings *systray.MenuItem @@ -287,6 +286,27 @@ type serviceClient struct { sDisableSSHAuth *widget.Check iSSHJWTCacheTTL *widget.Entry + // Phase 1+ ConnectionMode selector + per-mode timeout overrides. + // Defaulting to "Follow server" leaves the local override empty so + // the daemon uses whatever the management server pushes. + sConnectionMode *widget.Select + iRelayTimeout *widget.Entry + iP2pTimeout *widget.Entry + iP2pRetryMax *widget.Entry + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 + + // Phase 3.7h: latest values pushed by the management server, captured + // from GetConfigResponse.ServerPushed*. Used to render the + // "Follow server (currently: )" entry in the dropdown and the + // "use server default (Ns)" hints in the timeout entries. + serverPushedMode string + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + // observable settings over corresponding iMngURL and iPreSharedKey values. managementURL string preSharedKey string @@ -476,6 +496,19 @@ func (s *serviceClient) showSettingsUI() { s.sDisableSSHAuth = widget.NewCheck("Disable SSH Authentication", nil) s.iSSHJWTCacheTTL = widget.NewEntry() + // Connection-mode override + per-mode timeout fields. + // Order matches the Android spinner so behaviour is consistent. + s.sConnectionMode = widget.NewSelect( + []string{"Follow server", "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic"}, + func(string) { s.updateTimeoutEntriesEnabled() }, + ) + s.iRelayTimeout = widget.NewEntry() + s.iRelayTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pTimeout = widget.NewEntry() + s.iP2pTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pRetryMax = widget.NewEntry() + s.iP2pRetryMax.SetPlaceHolder("seconds (empty = use server default)") + s.wSettings.SetContent(s.getSettingsForm()) s.wSettings.Resize(fyne.NewSize(600, 400)) s.wSettings.SetFixedSize(true) @@ -586,9 +619,52 @@ func (s *serviceClient) hasSettingsChanged(iMngURL string, port, mtu int64) bool s.disableClientRoutes != s.sDisableClientRoutes.Checked || s.disableServerRoutes != s.sDisableServerRoutes.Checked || s.blockLANAccess != s.sBlockLANAccess.Checked || + s.hasConnectionModeChanges() || s.hasSSHChanges() } +// hasConnectionModeChanges reports whether the user touched the +// Connection Mode dropdown or any of the timeout entries on the +// Network tab. Empty / non-numeric timeout entries map to 0 +// (= no override). +func (s *serviceClient) hasConnectionModeChanges() bool { + if s.sConnectionMode == nil { + return false + } + desired := s.selectedConnectionMode() + if s.connectionMode != desired { + return true + } + return s.relayTimeoutSecs != parseUint32Field(s.iRelayTimeout.Text) || + s.p2pTimeoutSecs != parseUint32Field(s.iP2pTimeout.Text) || + s.p2pRetryMaxSecs != parseUint32Field(s.iP2pRetryMax.Text) +} + +// selectedConnectionMode returns the canonical mode string for the +// current dropdown selection. The "Follow server" entry maps to empty +// (clears any local override). It may carry a "(currently: )" +// suffix when the engine has received a PeerConfig, so we match by +// prefix. +func (s *serviceClient) selectedConnectionMode() string { + v := s.sConnectionMode.Selected + if v == "" || strings.HasPrefix(v, "Follow server") { + return "" + } + return v +} + +func parseUint32Field(text string) uint32 { + t := strings.TrimSpace(text) + if t == "" { + return 0 + } + v, err := strconv.ParseUint(t, 10, 32) + if err != nil { + return 0 + } + return uint32(v) +} + func (s *serviceClient) applySettingsChanges(iMngURL string, port, mtu int64) error { s.managementURL = iMngURL s.preSharedKey = s.iPreSharedKey.Text @@ -662,6 +738,17 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( req.OptionalPreSharedKey = &s.iPreSharedKey.Text } + // Connection-mode override + per-mode timeouts. Empty connection_mode + // clears any local override (= "Follow server"). + connMode := s.selectedConnectionMode() + req.ConnectionMode = &connMode + relaySecs := parseUint32Field(s.iRelayTimeout.Text) + p2pSecs := parseUint32Field(s.iP2pTimeout.Text) + retrySecs := parseUint32Field(s.iP2pRetryMax.Text) + req.RelayTimeoutSeconds = &relaySecs + req.P2PTimeoutSeconds = &p2pSecs + req.P2PRetryMaxSeconds = &retrySecs + return req, nil } @@ -731,10 +818,94 @@ func (s *serviceClient) getNetworkForm() *widget.Form { {Text: "Disable Client Routes", Widget: s.sDisableClientRoutes}, {Text: "Disable Server Routes", Widget: s.sDisableServerRoutes}, {Text: "Disable LAN Access", Widget: s.sBlockLANAccess}, + {Text: "Connection Mode", Widget: s.sConnectionMode}, + {Text: "Relay Timeout (s)", Widget: s.iRelayTimeout}, + {Text: "P2P Timeout (s)", Widget: s.iP2pTimeout}, + {Text: "P2P Retry-Max (s)", Widget: s.iP2pRetryMax}, }, } } +// followServerLabel returns the dropdown text for the "Follow server" +// option. When the engine has received a PeerConfig and the server has +// pushed a mode, we suffix it with "(currently: )" so users see +// what they would inherit by leaving the override on Follow server. +func (s *serviceClient) followServerLabel() string { + if s.serverPushedMode == "" { + return "Follow server" + } + return "Follow server (currently: " + s.serverPushedMode + ")" +} + +// formatTimeoutHint renders the placeholder text for an empty override +// entry, including the actual server-pushed default in seconds when +// available. +func formatTimeoutHint(secs uint32) string { + if secs == 0 { + return "seconds (empty = use server default)" + } + return "seconds (empty = use server default, " + strconv.FormatUint(uint64(secs), 10) + "s)" +} + +// refreshConnectionModeWidgets re-renders the Connection Mode dropdown +// and the timeout entries' placeholder text based on the latest +// server-pushed values. Safe to call multiple times. Preserves the +// current selection by canonical-mode string (so "(currently: ...)" +// suffix changes do not lose the user's choice). +func (s *serviceClient) refreshConnectionModeWidgets() { + if s.sConnectionMode == nil { + return + } + prev := s.selectedConnectionMode() + s.sConnectionMode.Options = []string{ + s.followServerLabel(), + "relay-forced", + "p2p", + "p2p-lazy", + "p2p-dynamic", + } + if prev == "" { + s.sConnectionMode.SetSelected(s.followServerLabel()) + } else { + s.sConnectionMode.SetSelected(prev) + } + s.sConnectionMode.Refresh() + + if s.iRelayTimeout != nil { + s.iRelayTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedRelayTimeoutSecs)) + } + if s.iP2pTimeout != nil { + s.iP2pTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pTimeoutSecs)) + } + if s.iP2pRetryMax != nil { + s.iP2pRetryMax.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pRetryMaxSecs)) + } +} + +// updateTimeoutEntriesEnabled enables only the timeout fields that are +// meaningful for the currently-selected connection mode. The lazy +// connection manager (and therefore inactivity teardown) only runs in +// p2p-lazy + p2p-dynamic, so other modes get all three fields disabled. +func (s *serviceClient) updateTimeoutEntriesEnabled() { + if s.iRelayTimeout == nil { + return + } + switch s.sConnectionMode.Selected { + case "p2p-lazy": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + case "p2p-dynamic": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Enable() + s.iP2pRetryMax.Enable() + default: + s.iRelayTimeout.Disable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + } +} + func (s *serviceClient) getSSHForm() *widget.Form { return &widget.Form{ Items: []*widget.FormItem{ @@ -1042,7 +1213,6 @@ func (s *serviceClient) onTrayReady() { s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", allowSSHMenuDescr, false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", autoConnectMenuDescr, false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", quantumResistanceMenuDescr, false) - s.mLazyConnEnabled = s.mSettings.AddSubMenuItemCheckbox("Enable Lazy Connections", lazyConnMenuDescr, false) s.mBlockInbound = s.mSettings.AddSubMenuItemCheckbox("Block Inbound Connections", blockInboundMenuDescr, false) s.mNotifications = s.mSettings.AddSubMenuItemCheckbox("Notifications", notificationsMenuDescr, false) s.mSettings.AddSeparator() @@ -1069,7 +1239,7 @@ func (s *serviceClient) onTrayReady() { s.mExitNode.Disable() s.exitNodeMu.Unlock() - s.mNetworks = systray.AddMenuItem("Networks", networksMenuDescr) + s.mNetworks = systray.AddMenuItem("Peers and Networks", networksMenuDescr) s.mNetworks.Disable() systray.AddSeparator() @@ -1314,6 +1484,14 @@ func (s *serviceClient) getSrvConfig() { cfg = protoConfigToConfig(srvCfg) + // Capture the raw server-pushed values so the UI can show + // "Follow server (currently: )" and the numeric default-hints + // in the override entries. + s.serverPushedMode = srvCfg.GetServerPushedConnectionMode() + s.serverPushedRelayTimeoutSecs = srvCfg.GetServerPushedRelayTimeoutSeconds() + s.serverPushedP2pTimeoutSecs = srvCfg.GetServerPushedP2PTimeoutSeconds() + s.serverPushedP2pRetryMaxSecs = srvCfg.GetServerPushedP2PRetryMaxSeconds() + if cfg.ManagementURL.String() != "" { s.managementURL = cfg.ManagementURL.String() } @@ -1348,6 +1526,11 @@ func (s *serviceClient) getSrvConfig() { s.sshJWTCacheTTL = *cfg.SSHJWTCacheTTL } + s.connectionMode = cfg.ConnectionMode + s.relayTimeoutSecs = cfg.RelayTimeoutSeconds + s.p2pTimeoutSecs = cfg.P2pTimeoutSeconds + s.p2pRetryMaxSecs = cfg.P2pRetryMaxSeconds + if s.showAdvancedSettings { s.iMngURL.SetText(s.managementURL) s.iPreSharedKey.SetText(cfg.PreSharedKey) @@ -1386,6 +1569,33 @@ func (s *serviceClient) getSrvConfig() { if cfg.SSHJWTCacheTTL != nil { s.iSSHJWTCacheTTL.SetText(strconv.Itoa(*cfg.SSHJWTCacheTTL)) } + + // Connection-mode dropdown + timeout entries. Refresh first so + // the "Follow server (currently: ...)" suffix and the numeric + // default-hints reflect what GetConfigResponse just delivered. + s.refreshConnectionModeWidgets() + switch cfg.ConnectionMode { + case "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic": + s.sConnectionMode.SetSelected(cfg.ConnectionMode) + default: + s.sConnectionMode.SetSelected(s.followServerLabel()) + } + if cfg.RelayTimeoutSeconds == 0 { + s.iRelayTimeout.SetText("") + } else { + s.iRelayTimeout.SetText(strconv.FormatUint(uint64(cfg.RelayTimeoutSeconds), 10)) + } + if cfg.P2pTimeoutSeconds == 0 { + s.iP2pTimeout.SetText("") + } else { + s.iP2pTimeout.SetText(strconv.FormatUint(uint64(cfg.P2pTimeoutSeconds), 10)) + } + if cfg.P2pRetryMaxSeconds == 0 { + s.iP2pRetryMax.SetText("") + } else { + s.iP2pRetryMax.SetText(strconv.FormatUint(uint64(cfg.P2pRetryMaxSeconds), 10)) + } + s.updateTimeoutEntriesEnabled() } if s.mNotifications == nil { @@ -1465,6 +1675,12 @@ func protoConfigToConfig(cfg *proto.GetConfigResponse) *profilemanager.Config { ttl := int(cfg.SshJWTCacheTTL) config.SSHJWTCacheTTL = &ttl + // Phase 1+ ConnectionMode override + per-mode timeouts. + config.ConnectionMode = cfg.ConnectionMode + config.RelayTimeoutSeconds = cfg.RelayTimeoutSeconds + config.P2pTimeoutSeconds = cfg.P2PTimeoutSeconds + config.P2pRetryMaxSeconds = cfg.P2PRetryMaxSeconds + return &config } @@ -1551,12 +1767,6 @@ func (s *serviceClient) loadSettings() { s.mEnableRosenpass.Uncheck() } - if cfg.LazyConnectionEnabled { - s.mLazyConnEnabled.Check() - } else { - s.mLazyConnEnabled.Uncheck() - } - if cfg.BlockInbound { s.mBlockInbound.Check() } else { @@ -1579,7 +1789,6 @@ func (s *serviceClient) updateConfig() error { disableAutoStart := !s.mAutoConnect.Checked() sshAllowed := s.mAllowSSH.Checked() rosenpassEnabled := s.mEnableRosenpass.Checked() - lazyConnectionEnabled := s.mLazyConnEnabled.Checked() blockInbound := s.mBlockInbound.Checked() notificationsDisabled := !s.mNotifications.Checked() @@ -1602,14 +1811,13 @@ func (s *serviceClient) updateConfig() error { } req := proto.SetConfigRequest{ - ProfileName: activeProf.Name, - Username: currUser.Username, - DisableAutoConnect: &disableAutoStart, - ServerSSHAllowed: &sshAllowed, - RosenpassEnabled: &rosenpassEnabled, - LazyConnectionEnabled: &lazyConnectionEnabled, - BlockInbound: &blockInbound, - DisableNotifications: ¬ificationsDisabled, + ProfileName: activeProf.Name, + Username: currUser.Username, + DisableAutoConnect: &disableAutoStart, + ServerSSHAllowed: &sshAllowed, + RosenpassEnabled: &rosenpassEnabled, + BlockInbound: &blockInbound, + DisableNotifications: ¬ificationsDisabled, } if _, err := conn.SetConfig(s.ctx, &req); err != nil { diff --git a/client/ui/const.go b/client/ui/const.go index 48619be752c..ce7a9a29421 100644 --- a/client/ui/const.go +++ b/client/ui/const.go @@ -4,7 +4,6 @@ const ( allowSSHMenuDescr = "Allow SSH connections" autoConnectMenuDescr = "Connect automatically when the service starts" quantumResistanceMenuDescr = "Enable post-quantum security via Rosenpass" - lazyConnMenuDescr = "[Experimental] Enable lazy connections" blockInboundMenuDescr = "Block inbound connections to the local machine and routed networks" notificationsMenuDescr = "Enable notifications" advancedSettingsMenuDescr = "Advanced settings of the application" diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 876fcef5fd8..90208230867 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -43,8 +43,6 @@ func (h *eventHandler) listen(ctx context.Context) { h.handleAutoConnectClick() case <-h.client.mEnableRosenpass.ClickedCh: h.handleRosenpassClick() - case <-h.client.mLazyConnEnabled.ClickedCh: - h.handleLazyConnectionClick() case <-h.client.mBlockInbound.ClickedCh: h.handleBlockInboundClick() case <-h.client.mAdvancedSettings.ClickedCh: @@ -152,15 +150,6 @@ func (h *eventHandler) handleRosenpassClick() { } } -func (h *eventHandler) handleLazyConnectionClick() { - h.toggleCheckbox(h.client.mLazyConnEnabled) - if err := h.updateConfigWithErr(); err != nil { - h.toggleCheckbox(h.client.mLazyConnEnabled) // revert checkbox state on error - log.Errorf("failed to update config: %v", err) - h.client.notifier.Send("Error", "Failed to update lazy connection settings") - } -} - func (h *eventHandler) handleBlockInboundClick() { h.toggleCheckbox(h.client.mBlockInbound) if err := h.updateConfigWithErr(); err != nil { diff --git a/client/ui/network.go b/client/ui/network.go index 571e871bbf2..20a7c5948c6 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -25,6 +25,7 @@ const ( allNetworksText = "All networks" overlappingNetworksText = "Overlapping networks" exitNodeNetworksText = "Exit-node networks" + peersText = "Peers" allNetworks filter = "all" overlappingNetworks filter = "overlapping" exitNodeNetworks filter = "exit-node" @@ -34,7 +35,7 @@ const ( type filter string func (s *serviceClient) showNetworksUI() { - s.wNetworks = s.app.NewWindow("Networks") + s.wNetworks = s.app.NewWindow("Peers and Networks") s.wNetworks.SetOnClosed(s.cancel) allGrid := container.New(layout.NewGridLayout(3)) @@ -42,17 +43,64 @@ func (s *serviceClient) showNetworksUI() { overlappingGrid := container.New(layout.NewGridLayout(3)) exitNodeGrid := container.New(layout.NewGridLayout(3)) routeCheckContainer := container.NewVBox() + peersBundle := s.buildPeersTabContent(s.ctx) + // Wrap the Peers tab content in a Stack so it fills the full tab + // area (NewBorder alone collapses when child MinSizes are small). tabs := container.NewAppTabs( + container.NewTabItem(peersText, container.NewStack(peersBundle.Content)), container.NewTabItem(allNetworksText, allGrid), container.NewTabItem(overlappingNetworksText, overlappingGrid), container.NewTabItem(exitNodeNetworksText, exitNodeGrid), ) - tabs.OnSelected = func(item *container.TabItem) { + + // Phase 3.7i (#5989): the outer footer adapts to the active tab so + // the user has a single place for actions. On the Peers tab we show + // only Show-Full + Refresh; on a Networks tab we show the legacy + // Refresh + Select-all + Deselect-All. + selectAllBtn := widget.NewButton("Select all", func() { + _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + s.selectAllFilteredNetworks(f) + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + }) + deselectAllBtn := widget.NewButton("Deselect All", func() { + _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + s.deselectAllFilteredNetworks(f) s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + }) + refreshBtn := widget.NewButton("Refresh", func() { + if tabs.Selected() != nil && tabs.Selected().Text == peersText { + peersBundle.Refresh() + } else { + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + } + }) + + updateFooter := func() { + onPeers := tabs.Selected() != nil && tabs.Selected().Text == peersText + if onPeers { + peersBundle.ShowFull.Show() + selectAllBtn.Hide() + deselectAllBtn.Hide() + } else { + peersBundle.ShowFull.Hide() + selectAllBtn.Show() + deselectAllBtn.Show() + } + } + + tabs.OnSelected = func(item *container.TabItem) { + updateFooter() + if item != nil && item.Text != peersText { + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + } } tabs.OnUnselected = func(item *container.TabItem) { - grid, _ := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - grid.Objects = nil + // Only reset network grids when leaving a network tab; the + // peers VBox manages its own state. + if item != nil && item.Text != peersText { + grid, _ := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + grid.Objects = nil + } } routeCheckContainer.Add(tabs) @@ -61,21 +109,13 @@ func (s *serviceClient) showNetworksUI() { buttonBox := container.NewHBox( layout.NewSpacer(), - widget.NewButton("Refresh", func() { - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), - widget.NewButton("Select all", func() { - _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - s.selectAllFilteredNetworks(f) - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), - widget.NewButton("Deselect All", func() { - _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - s.deselectAllFilteredNetworks(f) - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), + peersBundle.ShowFull, + refreshBtn, + selectAllBtn, + deselectAllBtn, layout.NewSpacer(), ) + updateFooter() // initial state matches the first tab (Peers) content := container.NewBorder(nil, buttonBox, nil, nil, scrollContainer) @@ -86,6 +126,17 @@ func (s *serviceClient) showNetworksUI() { } func (s *serviceClient) updateNetworks(grid *fyne.Container, f filter) { + s.updateNetworksWithMode(grid, f, false) +} + +// updateNetworksSilent is the auto-refresh entry point: it never pops up an +// error dialog, only logs. The user still gets the popup if they hit the +// Refresh button manually (which calls updateNetworks). +func (s *serviceClient) updateNetworksSilent(grid *fyne.Container, f filter) { + s.updateNetworksWithMode(grid, f, true) +} + +func (s *serviceClient) updateNetworksWithMode(grid *fyne.Container, f filter, silent bool) { grid.Objects = nil grid.Refresh() idHeader := widget.NewLabelWithStyle(" ID", fyne.TextAlignLeading, fyne.TextStyle{Bold: true}) @@ -96,7 +147,7 @@ func (s *serviceClient) updateNetworks(grid *fyne.Container, f filter) { grid.Add(networkHeader) grid.Add(resolvedIPsHeader) - filteredRoutes, err := s.getFilteredNetworks(f) + filteredRoutes, err := s.getFilteredNetworksWithMode(f, silent) if err != nil { return } @@ -118,7 +169,9 @@ func (s *serviceClient) updateNetworks(grid *fyne.Container, f filter) { domains := r.GetDomains() if len(domains) == 0 { - grid.Add(widget.NewLabel(network)) + rangeLabel := widget.NewLabel(network) + rangeLabel.Selectable = true + grid.Add(rangeLabel) grid.Add(widget.NewLabel("")) continue } @@ -154,10 +207,20 @@ func (s *serviceClient) updateNetworks(grid *fyne.Container, f filter) { } func (s *serviceClient) getFilteredNetworks(f filter) ([]*proto.Network, error) { + return s.getFilteredNetworksWithMode(f, false) +} + +func (s *serviceClient) getFilteredNetworksWithMode(f filter, silent bool) ([]*proto.Network, error) { routes, err := s.fetchNetworks() if err != nil { log.Errorf(getClientFMT, err) - s.showError(fmt.Errorf(getClientFMT, err)) + // Auto-refresh ticker fires every 10s; if the daemon IPC is down + // (e.g. user toggled VPN off, daemon restart, network drop) we + // must NOT spam a modal dialog every tick. Manual Refresh still + // shows the popup because the user expects feedback. + if !silent { + s.showError(fmt.Errorf(getClientFMT, err)) + } return nil, err } switch f { @@ -313,7 +376,12 @@ func (s *serviceClient) startAutoRefresh(interval time.Duration, tabs *container ticker := time.NewTicker(interval) go func() { for range ticker.C { - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodesGrid) + // Silent mode: auto-refresh never pops up modal "not + // connected" dialogs. The Refresh button still does, since + // the user expects feedback when they trigger it. + grid, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodesGrid) + s.wNetworks.Content().Refresh() + s.updateNetworksSilent(grid, f) } }() diff --git a/client/ui/peers_tab.go b/client/ui/peers_tab.go new file mode 100644 index 00000000000..209ddda7296 --- /dev/null +++ b/client/ui/peers_tab.go @@ -0,0 +1,388 @@ +//go:build !(linux && 386) + +package main + +import ( + "context" + "fmt" + "image/color" + "sort" + "strings" + "sync" + "time" + + "fyne.io/fyne/v2" + "fyne.io/fyne/v2/canvas" + "fyne.io/fyne/v2/container" + "fyne.io/fyne/v2/widget" + + "github.com/netbirdio/netbird/client/proto" +) + +// Phase 3.7i color palette for peer-row status swatch. +// +// P2P -> bright green (best path active) +// Relayed -> dark green (working but suboptimal) +// Idle -> grey (no active connection, waiting for traffic) +// Offline -> red (peer can't be reached at all on the server) +var ( + colorPeerP2P = color.NRGBA{R: 0x2e, G: 0xc8, B: 0x6b, A: 0xff} // #2EC86B + colorPeerRelayed = color.NRGBA{R: 0x1e, G: 0x6b, B: 0x3a, A: 0xff} // #1E6B3A + colorPeerIdle = color.NRGBA{R: 0x88, G: 0x88, B: 0x88, A: 0xff} // #888888 + colorPeerOffline = color.NRGBA{R: 0xd2, G: 0x3b, B: 0x3b, A: 0xff} // #D23B3B +) + +// peersTabBundle is what buildPeersTabContent returns: the tab content +// that lives inside AppTabs PLUS the Show-Full checkbox + the refresh +// callback that the OUTER window footer needs (so the user has a single +// footer for both showFull-toggle and Refresh-trigger). Phase 3.7i. +type peersTabBundle struct { + Content fyne.CanvasObject + ShowFull *widget.Check + Refresh func() +} + +// buildPeersTabContent constructs the "Peers" tab content (counter + +// list of expandable peer rows). Show-Full + Refresh live in the outer +// window footer (returned via peersTabBundle so network.go can place +// them). Phase 3.7i of #5989. +func (s *serviceClient) buildPeersTabContent(ctx context.Context) peersTabBundle { + summary := widget.NewLabel("") + breakdown := widget.NewLabel("") + listVBox := container.NewVBox() + showFull := widget.NewCheck("Show full peer details", nil) + + // Per-peer expand state survives Refresh (otherwise every render + // would collapse all rows the user just opened). Keyed by pubkey. + expandedMu := sync.Mutex{} + expanded := make(map[string]bool) + + render := func() { + conn, err := s.getSrvClient(failFastTimeout) + if err != nil { + fyne.Do(func() { summary.SetText("Error: " + err.Error()) }) + return + } + callCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + st, err := conn.Status(callCtx, &proto.StatusRequest{GetFullPeerStatus: true}) + if err != nil { + fyne.Do(func() { summary.SetText("Error: " + err.Error()) }) + return + } + fs := st.GetFullStatus() + + fyne.Do(func() { + summary.SetText(fmt.Sprintf("%d of %d peers online (server)", + fs.GetServerOnlinePeers(), fs.GetConfiguredPeersTotal())) + breakdown.SetText(fmt.Sprintf("%d P2P | %d relayed | %d idle | %d offline", + fs.GetP2PConnectedPeers(), fs.GetRelayedConnectedPeers(), + fs.GetIdleOnlinePeers(), fs.GetServerOfflinePeers())) + + listVBox.Objects = nil + peers := fs.GetPeers() + sort.SliceStable(peers, func(i, j int) bool { + gi, gj := peerGroup(peers[i]), peerGroup(peers[j]) + if gi != gj { + return gi < gj + } + return strings.ToLower(peers[i].GetFqdn()) < strings.ToLower(peers[j].GetFqdn()) + }) + for _, p := range peers { + listVBox.Add(newPeerRow(p, showFull.Checked, &expandedMu, expanded)) + } + listVBox.Refresh() + }) + } + + showFull.OnChanged = func(_ bool) { render() } + + // Lifecycle-safe periodic refresh: ctx-respecting, exits when the + // serviceClient context is cancelled (i.e. the UI process shuts down). + // 30 s polling -- daemon-RPC is local so cost is small. + go func() { + render() + t := time.NewTicker(30 * time.Second) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + render() + } + } + }() + + // Place listVBox directly in Border center (no inner VScroll). The + // outer Networks-window already wraps everything in a VScroll, so + // nesting another would create double-scroll UX. Border center + // auto-grows to fit listVBox content; outer scroll handles overflow. + content := container.NewBorder( + container.NewVBox(summary, breakdown), + nil, nil, nil, + listVBox, + ) + return peersTabBundle{Content: content, ShowFull: showFull, Refresh: render} +} + +// newPeerRow returns a single expandable row: a clickable header that +// dynamically adds/removes a detail label below it on tap. Expansion +// state is persisted in `expanded` (keyed by pubkey) so Refresh doesn't +// collapse rows the user just opened. Multiple rows can be expanded +// simultaneously (each row owns its own state). Phase 3.7i of #5989. +func newPeerRow(p *proto.PeerState, showFull bool, mu *sync.Mutex, expanded map[string]bool) *fyne.Container { + pubkey := p.GetPubKey() + titleCollapsed := fmt.Sprintf("▶ %s %s %s", peerGlyph(p), peerHostnameShort(p), peerModeTag(p)) + titleExpanded := fmt.Sprintf("▼ %s %s %s", peerGlyph(p), peerHostnameShort(p), peerModeTag(p)) + + mu.Lock() + startExpanded := expanded[pubkey] + mu.Unlock() + + header := widget.NewButton(titleCollapsed, nil) + header.Alignment = widget.ButtonAlignLeading + header.Importance = widget.LowImportance + + // Phase 3.7i colored status swatch on the left of every row. + swatch := canvas.NewRectangle(peerSwatchColor(p)) + swatch.SetMinSize(fyne.NewSize(6, 1)) // 6px-wide vertical bar + + row := container.NewBorder(nil, nil, swatch, nil, header) + box := container.NewVBox(row) + var detail *widget.Label + + addDetail := func() { + detail = widget.NewLabel(buildPeerDetailText(p, showFull)) + detail.Wrapping = fyne.TextWrapWord + detail.TextStyle = fyne.TextStyle{Monospace: true} + // Phase 3.7i: let the user mark + copy peer-detail text with the + // mouse (e.g. to paste an FQDN or IP into another tool). Fyne + // 2.6+ Label supports the Selectable flag for this. + detail.Selectable = true + box.Add(detail) + header.SetText(titleExpanded) + } + removeDetail := func() { + if detail != nil { + box.Remove(detail) + detail = nil + } + header.SetText(titleCollapsed) + } + + if startExpanded { + addDetail() + } + + header.OnTapped = func() { + mu.Lock() + nowExpanded := !expanded[pubkey] + expanded[pubkey] = nowExpanded + mu.Unlock() + if nowExpanded { + addDetail() + } else { + removeDetail() + } + box.Refresh() + } + return box +} + +func peerGroup(p *proto.PeerState) int { + if !p.GetServerOnline() { + return 3 + } + cs := strings.ToLower(p.GetConnStatus()) + if cs == "connected" && !p.GetRelayed() { + return 0 + } + if cs == "connected" && p.GetRelayed() { + return 1 + } + return 2 +} + +// peerSwatchColor returns the color of the leading status swatch on a +// peer row. Mirrors the peerGroup buckets but uses the extended hybrid +// label when present so the negotiating window also gets a sensible +// color (dark green like Relayed). +func peerSwatchColor(p *proto.PeerState) color.Color { + if !p.GetServerOnline() { + return colorPeerOffline + } + switch p.GetConnectionTypeExtended() { + case "P2P": + return colorPeerP2P + case "Relayed", "Relayed (negotiating P2P)": + return colorPeerRelayed + } + switch peerGroup(p) { + case 0: + return colorPeerP2P + case 1: + return colorPeerRelayed + case 2: + return colorPeerIdle + default: + return colorPeerOffline + } +} + +func peerGlyph(p *proto.PeerState) string { + // Phase 3.7i hybrid display: prefer the daemon-derived + // ConnectionTypeExtended when set so all UIs render the brief + // "Relayed (negotiating P2P)" wakeup window consistently. + switch p.GetConnectionTypeExtended() { + case "P2P": + return "[P2P]" + case "Relayed": + return "[Relay]" + case "Relayed (negotiating P2P)": + return "[Relay→P2P]" + } + switch peerGroup(p) { + case 0: + return "[P2P]" + case 1: + return "[Relay]" + case 2: + return "[Idle]" + default: + return "[Offline]" + } +} + +func peerHostnameShort(p *proto.PeerState) string { + fqdn := p.GetFqdn() + if i := strings.Index(fqdn, "."); i > 0 { + return fqdn[:i] + } + return fqdn +} + +func peerModeTag(p *proto.PeerState) string { + eff, cfg := p.GetEffectiveConnectionMode(), p.GetConfiguredConnectionMode() + if eff == "" { + return "" + } + if cfg != "" && cfg != eff { + return "! " + eff + " (cfg: " + cfg + ")" + } + return eff +} + +// buildPeerDetailText builds the per-peer detail text. Standard fields +// always shown. When `full` is true an additional section with the +// extra technical fields (transfer counters, configured timeouts, etc.) +// is appended. +func buildPeerDetailText(p *proto.PeerState, full bool) string { + var sb strings.Builder + fmt.Fprintf(&sb, "IP: %s\n", p.GetIP()) + fmt.Fprintf(&sb, "FQDN: %s\n", p.GetFqdn()) + connType := p.GetConnStatus() + // Phase 3.7i: prefer the daemon-derived hybrid label so the + // transient "Relayed (negotiating P2P)" wakeup window is visible. + if ext := p.GetConnectionTypeExtended(); ext != "" { + connType = ext + } else if p.GetRelayed() { + connType += " (relayed)" + } + fmt.Fprintf(&sb, "Connection type: %s\n", connType) + fmt.Fprintf(&sb, "Effective mode: %s\n", orDashStr(p.GetEffectiveConnectionMode())) + if p.GetEffectiveConnectionMode() != p.GetConfiguredConnectionMode() && p.GetConfiguredConnectionMode() != "" { + fmt.Fprintf(&sb, "Configured mode: %s\n", orDashStr(p.GetConfiguredConnectionMode())) + } + if hs := p.GetLastWireguardHandshake(); hs != nil && hs.IsValid() { + fmt.Fprintf(&sb, "Last handshake: %s\n", hs.AsTime().Format(time.RFC3339)) + } + fmt.Fprintf(&sb, "Latency: %s\n", peerLatencyStr(p)) + if strings.EqualFold(p.GetConnStatus(), "connected") { + if p.GetRelayed() { + fmt.Fprintf(&sb, "Relay server: %s\n", orDashStr(p.GetRelayAddress())) + } else { + fmt.Fprintf(&sb, "Local endpoint: %s\n", orDashStr(p.GetLocalIceCandidateEndpoint())) + fmt.Fprintf(&sb, "Remote endpoint: %s\n", orDashStr(p.GetRemoteIceCandidateEndpoint())) + } + } + if ls := p.GetLastSeenAtServer(); ls != nil && ls.IsValid() { + fmt.Fprintf(&sb, "Last seen at srv: %s\n", ls.AsTime().Format(time.RFC3339)) + } + if g := p.GetGroups(); len(g) > 0 { + fmt.Fprintf(&sb, "Groups: %s\n", strings.Join(g, ", ")) + } + + if full { + sb.WriteString("\n--- Full details ---\n") + fmt.Fprintf(&sb, "Public key: %s\n", p.GetPubKey()) + fmt.Fprintf(&sb, "Transfer rx/tx: %s / %s\n", + humanBytes(uint64(p.GetBytesRx())), humanBytes(uint64(p.GetBytesTx()))) + if eff := p.GetEffectiveRelayTimeoutSecs(); eff > 0 { + fmt.Fprintf(&sb, "Relay timeout: %d s (eff)\n", eff) + } + if eff := p.GetEffectiveP2PTimeoutSecs(); eff > 0 { + fmt.Fprintf(&sb, "P2P timeout: %d s (eff)\n", eff) + } + if eff := p.GetEffectiveP2PRetryMaxSecs(); eff > 0 { + fmt.Fprintf(&sb, "P2P retry-max: %d s (eff)\n", eff) + } + if local, remote := p.GetLocalIceCandidateType(), p.GetRemoteIceCandidateType(); local != "" || remote != "" { + fmt.Fprintf(&sb, "ICE candidate L/R: %s / %s\n", orDashStr(local), orDashStr(remote)) + } + if iceFails := p.GetIceBackoffFailures(); iceFails > 0 { + fmt.Fprintf(&sb, "ICE backoff fails: %d\n", iceFails) + } + // Codex finding 4: the daemon snapshot only refreshes on ICE + // state-change events, so IceBackoffSuspended stays true even + // after nextRetry has passed by wall-clock. Mirror the CLI's + // (status.go:797) wall-clock check so we don't display a stale + // "SUSPENDED" hours after the cool-down actually expired. + if nr := p.GetIceBackoffNextRetry(); nr != nil && nr.IsValid() { + next := nr.AsTime() + d := time.Until(next) + switch { + case p.GetIceBackoffSuspended() && d > 0: + fmt.Fprintf(&sb, "ICE backoff: suspended for %s (retry at %s)\n", + d.Round(time.Second), next.Format(time.RFC3339)) + case d > 0: + fmt.Fprintf(&sb, "ICE next retry: %s (in %s)\n", + next.Format(time.RFC3339), d.Round(time.Second)) + // d <= 0 (cool-down expired by wall-clock): suppress entirely. + } + } + } + return sb.String() +} + +func orDashStr(s string) string { + if s == "" { + return "-" + } + return s +} + +func peerLatencyStr(p *proto.PeerState) string { + lat := p.GetLatency() + if lat == nil { + return "-" + } + d := lat.AsDuration() + if d == 0 { + return "-" + } + return d.Round(time.Microsecond).String() +} + +func humanBytes(b uint64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := uint64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp]) +} diff --git a/docs/bugs/2026-05-04-user-peer-visibility-regression.md b/docs/bugs/2026-05-04-user-peer-visibility-regression.md new file mode 100644 index 00000000000..99c0ef34d17 --- /dev/null +++ b/docs/bugs/2026-05-04-user-peer-visibility-regression.md @@ -0,0 +1,98 @@ +# Upstream NetBird Regression: User Role Loses Visibility of Policy-Reachable Peers + +**Reported:** 2026-05-04 by Michael Uray ("Georg sees only his own peer in the dashboard, not the Gegenstellen — that's not what we want, before each user saw their own peers PLUS their counterparts"). +**Affects:** Anyone running NetBird upstream commit `db44848e2` or later (merged 2026-04-28 in PR #6006 "[management] Drop netmap calculation on peer read"). +**Severity:** UX-breaking for any account using the `user` role -- those users see ONLY their own peers in the web dashboard, no longer the routing peers / counterparts their access policies allow them to reach. + +## What changed upstream + +PR #6006 simplified `GetPeers` and `GetPeer` in `management/server/peer.go`. The pre-PR code had two branches for a non-admin user: + + 1. Collect their OWN peers (filter by `peer.UserID == user.Id`). + 2. For each own peer, expand with peers reachable via account ACL + (`account.GetPeerConnectionResources(...)`), and merge the results. + Implemented in helper `getUserAccessiblePeers`. + +PR #6006 deleted step 2 + the helper. New code: + +```go +return am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) +``` + +Same regression in `GetPeer` (single-peer detail) -- the helper +`checkIfUserOwnsPeer` was removed; it now returns `Internal` error +for any peer the user does not directly own. + +The PR title indicates the motivation: avoiding a netmap calculation +on every peer-list read because it was expensive on large accounts. +Trade-off was made WITHOUT replacing the visibility logic with +something cheaper, so the feature was simply lost. + +## Symptom on our deployment + +`georg.stoisser-gigacher` has 1 own peer (`ctb50-d`) and 17 auto_groups, +including 16 `*-access` groups that source policies into `*-Rx` / +`*-NW` destination groups (e.g. `Lunz.am.See.FWR-access` -> +`Lunz.am.See.FWR.Rx`). With the regressed code, the dashboard +shows only `ctb50-d`. Operationally useless -- the user wants to +see the routing peers their device can reach. + +Switching the role to `auditor` works around the regression but +gives the user read access to ALL peers (including peers belonging +to other users' personal devices), which is too much. + +## Fix on this branch + +Branch: `fix/user-peer-visibility-restore` (NetBird repo) +Base: `730f9840c` (Phase-3.7i complete set) + +Restored both helpers: + +- `getUserAccessiblePeers(ctx, accountID, ownPeers)` -- expands the + own-peer list with ACL-reachable peers via + `account.GetPeerConnectionResources` (which is still in the + codebase, just no longer called from peer.go). +- `checkIfUserOwnsPeer(ctx, accountID, userID, peer)` -- per-peer + check: any of the user's own peers reaches `peer` via ACL? + +Code paths in `GetPeers` and `GetPeer` re-wired to call them. Both +functions match the upstream pre-#6006 behaviour byte-for-byte +modulo the surrounding code that was already refactored +(name/IP filter handling stays in the caller; signature uses +`ownPeers` directly to avoid a redundant `GetAccountPeers` call). + +## Verification + +After deploying the rebuilt mgmt image: + + curl -sk -H "Authorization: Token " \ + https://netbird.uplink.plant-control.net:44106/api/peers \ + | jq '. | length' + +Expected: more than 1 (own peer + every routing peer / Gegenstelle his +access policies allow him to reach), but LESS than 58 (he should NOT +see other users' personal peers). + +## Performance note + +PR #6006 was correct that `GetPeerConnectionResources` is not free. +Mitigations we keep in mind: + + - It is only called for users who hit the non-admin branch (typically + a small subset of total /api/peers requests -- admin tools call + with admin tokens). + - Each invocation walks `len(ownPeers)` policies. Most users have + 1-3 own peers, so cost stays low. + - `requestBuffer.GetAccountWithBackpressure` is the same call + upstream used. We add no new pressure beyond what the pre-#6006 + code already had. + - For large accounts where this still hurts, a follow-up could cache + the user-id -> reachable-peer-id set per account snapshot, with + invalidation on policy / group / peer change. + +## Upstream relationship + +Worth filing this as an upstream issue / PR after we ship our fix +internally. Either the visibility loss was deliberate (then it's a +deliberate UX downgrade that should be documented) or it was an +oversight (then the helpers should come back, perhaps with a cache). diff --git a/docs/superpowers/plans/2026-04-06-kernel-ice-separate-port.md b/docs/superpowers/plans/2026-04-06-kernel-ice-separate-port.md new file mode 100644 index 00000000000..968006a7d3b --- /dev/null +++ b/docs/superpowers/plans/2026-04-06-kernel-ice-separate-port.md @@ -0,0 +1,115 @@ +# Kernel WireGuard ICE Separate Port Fix + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Fix P2P ICE connectivity in kernel WireGuard mode by using a separate UDP port for ICE instead of sharing port 51820 with the kernel WireGuard module via raw sockets. + +**Architecture:** Replace the `sharedsock` raw socket approach in `device_kernel_unix.go` with a standard UDP socket on a separate port (system-assigned). The UDPMux will use this dedicated socket for all ICE STUN traffic, while WireGuard keeps exclusive ownership of port 51820. This mirrors how the userspace mode works but without coupling ICE to the WireGuard bind. + +**Tech Stack:** Go, Pion ICE, WireGuard kernel module, Linux UDP sockets + +--- + +### Task 1: Replace sharedsock with standard UDP socket in TunKernelDevice.Up() + +**Files:** +- Modify: `client/iface/device/device_kernel_unix.go:82-118` + +- [ ] **Step 1: Replace raw socket with standard UDP socket** + +Replace the `sharedsock.Listen()` call with a standard `net.ListenUDP()` on port 0 (system-assigned). Update the UDPMux creation to use this socket. + +In `device_kernel_unix.go`, replace the `Up()` method: + +```go +func (t *TunKernelDevice) Up() (*udpmux.UniversalUDPMuxDefault, error) { + if t.udpMux != nil { + return t.udpMux, nil + } + + if t.link == nil { + return nil, fmt.Errorf("device is not ready yet") + } + + log.Debugf("bringing up interface: %s", t.name) + + if err := t.link.up(); err != nil { + log.Errorf("error bringing up interface: %s", t.name) + return nil, err + } + + // Use a dedicated UDP socket for ICE instead of sharing the WireGuard port + // via raw sockets. The kernel WireGuard module owns port 51820 exclusively; + // attempting to share it via sharedsock causes ICE packets to never be sent. + udpConn, err := net.ListenUDP("udp4", &net.UDPAddr{Port: 0}) + if err != nil { + return nil, fmt.Errorf("listen udp for ICE: %w", err) + } + log.Infof("ICE using dedicated UDP port: %d (WireGuard kernel owns port %d)", udpConn.LocalAddr().(*net.UDPAddr).Port, t.wgPort) + + bindParams := udpmux.UniversalUDPMuxParams{ + UDPConn: nbnet.WrapPacketConn(udpConn), + Net: t.transportNet, + FilterFn: t.filterFn, + WGAddress: t.address, + MTU: t.mtu, + } + mux := udpmux.NewUniversalUDPMuxDefault(bindParams) + go mux.ReadFromConn(t.ctx) + t.udpMuxConn = udpConn + t.udpMux = mux + + log.Debugf("device is ready to use: %s", t.name) + return t.udpMux, nil +} +``` + +- [ ] **Step 2: Remove unused sharedsock import** + +Remove `"github.com/netbirdio/netbird/sharedsock"` from the imports in `device_kernel_unix.go` since it is no longer used. + +- [ ] **Step 3: Build and verify compilation** + +Run: `cd /home/ai-agent/projects/netbird && GOOS=linux GOARCH=arm64 go build ./client/` +Expected: Successful build, no errors. + +- [ ] **Step 4: Cross-compile for arm64 (OpenWrt router)** + +```bash +cd /home/ai-agent/projects/netbird +GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -o /tmp/netbird-kernel-fix-arm64 ./client/ +``` + +- [ ] **Step 5: Deploy to test router and verify** + +```bash +# Stop NetBird, install kmod-wireguard, deploy new binary +ssh root@ 'killall -9 netbird; sleep 2' +cat /tmp/netbird-kernel-fix-arm64 | ssh root@ 'cat > /usr/bin/netbird && chmod +x /usr/bin/netbird' +ssh root@ 'apk add kmod-wireguard && modprobe wireguard' +ssh root@ '/etc/init.d/netbird restart' +``` + +Wait 20s, then verify: +- `netbird status` shows `Interface type: Kernel` +- Connection to same-LAN peer shows `Connection type: P2P` +- ICE candidate endpoints show LAN IPs (e.g. `192.168.91.x:NNNNN`) + +- [ ] **Step 6: Commit** + +```bash +git add client/iface/device/device_kernel_unix.go +git commit -m "fix(client): use separate UDP port for ICE in kernel WireGuard mode + +In kernel WireGuard mode, the WireGuard module exclusively owns UDP port +51820. The previous approach used a raw socket (sharedsock) to intercept +STUN packets on the same port, but this failed to send ICE connectivity +checks on some platforms (confirmed on OpenWrt/ARM64). + +Replace the shared raw socket with a dedicated UDP socket on a +system-assigned port. ICE STUN traffic now flows through this separate +port while WireGuard retains exclusive use of port 51820. + +This fixes P2P connections failing in kernel WireGuard mode, where all +peers would fall back to relay despite being on the same LAN." +``` diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 36de950e9d7..a1418f7fd60 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -169,6 +169,9 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin return fmt.Errorf("failed to get account zones: %v", err) } + // Phase 3.7i: build once, share read-only across goroutines. + groupNamesByPeerID := grpc.BuildGroupNamesByPeerID(account.Groups) + for _, peer := range account.Peers { if !c.peersUpdateManager.HasChannel(peer.ID) { log.WithContext(ctx).Tracef("peer %s doesn't have a channel, skipping network map update", peer.ID) @@ -203,7 +206,7 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin peerGroups := account.GetPeerGroups(p.ID) start = time.Now() - update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, p, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSetting, maps.Keys(peerGroups), dnsFwdPort) + update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, p, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSetting, maps.Keys(peerGroups), dnsFwdPort, groupNamesByPeerID) c.metrics.CountToSyncResponseDuration(time.Since(start)) c.peersUpdateManager.SendUpdate(ctx, p.ID, &network_map.UpdateMessage{ @@ -324,8 +327,10 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe peerGroups := account.GetPeerGroups(peerId) dnsFwdPort := computeForwarderPort(maps.Values(account.Peers), network_map.DnsForwarderPortMinVersion) + // Phase 3.7i: build group names map for remote-peer annotations. + groupNamesByPeerID := grpc.BuildGroupNamesByPeerID(account.Groups) - update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, peer, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSettings, maps.Keys(peerGroups), dnsFwdPort) + update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, peer, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSettings, maps.Keys(peerGroups), dnsFwdPort, groupNamesByPeerID) c.peersUpdateManager.SendUpdate(ctx, peer.ID, &network_map.UpdateMessage{ Update: update, MessageType: network_map.MessageTypeNetworkMap, diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index f2ab0a2c4df..d55b6a7f38b 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -5,7 +5,6 @@ package server import ( "context" "crypto/tls" - "net/http" "net/netip" "slices" "time" @@ -30,6 +29,7 @@ import ( nbcache "github.com/netbirdio/netbird/management/server/cache" nbContext "github.com/netbirdio/netbird/management/server/context" nbhttp "github.com/netbirdio/netbird/management/server/http" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/server/http/middleware" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/telemetry" @@ -108,9 +108,23 @@ func (s *BaseServer) EventStore() activity.Store { }) } -func (s *BaseServer) APIHandler() http.Handler { - return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies, s.RateLimiter()) +// PeerConnStore returns the shared in-memory peer-connection-map store. +// Phase 3.7i of #5989: constructed once, shared between gRPC and HTTP servers. +func (s *BaseServer) PeerConnStore() peer_connections.Store { + return Create(s, func() peer_connections.Store { + return peer_connections.NewMemoryStore(1 * time.Hour) + }) +} + +// PeerConnRouter returns the shared SnapshotRouter. +// Phase 3.7i of #5989: constructed once, shared between gRPC and HTTP servers. +func (s *BaseServer) PeerConnRouter() *peer_connections.SnapshotRouter { + return Create(s, peer_connections.NewSnapshotRouter) +} + +func (s *BaseServer) APIHandler() *nbhttp.APIHandler { + return Create(s, func() *nbhttp.APIHandler { + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies, s.RateLimiter(), s.PeerConnStore(), s.PeerConnRouter()) if err != nil { log.Fatalf("failed to create API handler: %v", err) } @@ -173,7 +187,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } gRPCAPIHandler := grpc.NewServer(gRPCOpts...) - srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider(), s.SessionStore()) + srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider(), s.SessionStore(), s.PeerConnStore(), s.PeerConnRouter()) if err != nil { log.Fatalf("failed to create management server: %v", err) } diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index ef417d3cfb5..b9a772c88c9 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -4,11 +4,15 @@ import ( "context" "fmt" "net/url" + "slices" + "sort" "strings" log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/timestamppb" integrationsConfig "github.com/netbirdio/management-integrations/integrations/config" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" @@ -22,6 +26,11 @@ import ( "github.com/netbirdio/netbird/shared/sshauth" ) +// p2pRetryMaxDisabledSentinel is the wire-format value that signals +// "user-explicit disable backoff" (uint32-max). The 0 wire-value is +// reserved for "not set, use daemon default". Phase 3 of #5989. +const p2pRetryMaxDisabledSentinel = ^uint32(0) + func toNetbirdConfig(config *nbconfig.Config, turnCredentials *Token, relayToken *Token, extraSettings *types.ExtraSettings) *proto.NetbirdConfig { if config == nil { return nil @@ -100,12 +109,69 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set sshConfig.JwtConfig = buildJWTConfig(httpConfig, deviceFlowConfig) } + // Resolve the effective ConnectionMode for this peer. + // Phase 1: account-wide settings only (per-peer / per-group resolution + // follows in Phase 3 / issue #5990). The new ConnectionMode field wins + // over the legacy LazyConnectionEnabled boolean. UNSPECIFIED in Settings + // (i.e. ConnectionMode == nil) falls back to the legacy bool. + resolvedMode := connectionmode.ResolveLegacyLazyBool(settings.LazyConnectionEnabled) + if settings.ConnectionMode != nil { + if m, err := connectionmode.ParseString(*settings.ConnectionMode); err == nil && m != connectionmode.ModeUnspecified { + resolvedMode = m + } + } + + relayTO := uint32(0) + if settings.RelayTimeoutSeconds != nil { + relayTO = *settings.RelayTimeoutSeconds + } + p2pTO := uint32(0) + if settings.P2pTimeoutSeconds != nil { + p2pTO = *settings.P2pTimeoutSeconds + } + p2pRetryMax := uint32(0) + if settings.P2pRetryMaxSeconds != nil { + if *settings.P2pRetryMaxSeconds == 0 { + p2pRetryMax = p2pRetryMaxDisabledSentinel + } else { + p2pRetryMax = *settings.P2pRetryMaxSeconds + } + } + + // Phase 3.7i (#5989): legacy-client compatibility for p2p-dynamic mode. + // Clients that do NOT advertise the "p2p_dynamic" capability cannot + // honour the new ConnectionMode enum (proto3 default behaviour: they + // just ignore the unknown enum value). To give them deterministic and + // network-friendly behaviour, downgrade the per-peer config to + // p2p-lazy with the admin-configured fallback timeout. The toggle + // defaults to ON; admins who know their entire fleet is on a 3.7i+ + // build can disable it to send raw p2p-dynamic to everyone. + if resolvedMode == connectionmode.ModeP2PDynamic && settings.LegacyLazyFallbackEnabled { + if !slices.Contains(peer.Meta.SupportedFeatures, "p2p_dynamic") { + resolvedMode = connectionmode.ModeP2PLazy + relayTO = settings.LegacyLazyFallbackTimeoutSeconds + // p2pTO and p2pRetryMax stay as configured -- p2p-lazy mode + // doesn't drive ICE-worker tear-down, so they are inert for + // legacy clients. Leaving them populated keeps the wire + // payload identical for every peer (cache-friendly) and + // avoids surprising future modes that might consume them. + } + } + return &proto.PeerConfig{ Address: fmt.Sprintf("%s/%d", peer.IP.String(), netmask), SshConfig: sshConfig, Fqdn: fqdn, RoutingPeerDnsResolutionEnabled: settings.RoutingPeerDNSResolutionEnabled, - LazyConnectionEnabled: settings.LazyConnectionEnabled, + // Send BOTH the new enum (for new clients) and the legacy boolean + // (for old clients). New clients prefer the explicit enum and + // ignore the bool; old clients ignore the unknown enum field + // (proto3 default behaviour) and fall back to the bool. + LazyConnectionEnabled: resolvedMode.ToLazyConnectionEnabled(), + ConnectionMode: resolvedMode.ToProto(), + P2PTimeoutSeconds: p2pTO, + P2PRetryMaxSeconds: p2pRetryMax, + RelayTimeoutSeconds: relayTO, AutoUpdate: &proto.AutoUpdateSettings{ Version: settings.AutoUpdateVersion, AlwaysUpdate: settings.AutoUpdateAlways, @@ -113,7 +179,7 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set } } -func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfig.DeviceAuthorizationFlow, peer *nbpeer.Peer, turnCredentials *Token, relayCredentials *Token, networkMap *types.NetworkMap, dnsName string, checks []*posture.Checks, dnsCache *cache.DNSConfigCache, settings *types.Settings, extraSettings *types.ExtraSettings, peerGroups []string, dnsFwdPort int64) *proto.SyncResponse { +func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfig.DeviceAuthorizationFlow, peer *nbpeer.Peer, turnCredentials *Token, relayCredentials *Token, networkMap *types.NetworkMap, dnsName string, checks []*posture.Checks, dnsCache *cache.DNSConfigCache, settings *types.Settings, extraSettings *types.ExtraSettings, peerGroups []string, dnsFwdPort int64, groupNamesByPeerID map[string][]string) *proto.SyncResponse { response := &proto.SyncResponse{ PeerConfig: toPeerConfig(peer, networkMap.Network, dnsName, settings, httpConfig, deviceFlowConfig, networkMap.EnableSSH), NetworkMap: &proto.NetworkMap{ @@ -131,14 +197,20 @@ func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nb response.NetworkMap.PeerConfig = response.PeerConfig + appendCtx := AppendRemotePeerConfigContext{ + DNSDomain: dnsName, + Cfg: settings, + GroupNamesByPeerID: groupNamesByPeerID, + } + remotePeers := make([]*proto.RemotePeerConfig, 0, len(networkMap.Peers)+len(networkMap.OfflinePeers)) - remotePeers = appendRemotePeerConfig(remotePeers, networkMap.Peers, dnsName) + remotePeers = appendRemotePeerConfig(remotePeers, networkMap.Peers, appendCtx) response.RemotePeers = remotePeers response.NetworkMap.RemotePeers = remotePeers response.RemotePeersIsEmpty = len(remotePeers) == 0 response.NetworkMap.RemotePeersIsEmpty = response.RemotePeersIsEmpty - response.NetworkMap.OfflinePeers = appendRemotePeerConfig(nil, networkMap.OfflinePeers, dnsName) + response.NetworkMap.OfflinePeers = appendRemotePeerConfig(nil, networkMap.OfflinePeers, appendCtx) firewallRules := toProtocolFirewallRules(networkMap.FirewallRules) response.NetworkMap.FirewallRules = firewallRules @@ -195,19 +267,105 @@ func buildAuthorizedUsersProto(ctx context.Context, authorizedUsers map[string]m return hashedUsers, machineUsers } -func appendRemotePeerConfig(dst []*proto.RemotePeerConfig, peers []*nbpeer.Peer, dnsName string) []*proto.RemotePeerConfig { +// AppendRemotePeerConfigContext bundles per-account settings + per-peer +// group lookups so appendRemotePeerConfig stays free of DB calls. +// Callers (in conversion.go) materialise this once per NetworkMap build. +type AppendRemotePeerConfigContext struct { + DNSDomain string + // Cfg is the account-wide configured mode/timeouts. Nil when unavailable. + Cfg *types.Settings + // GroupNamesByPeerID maps a peer ID to its sorted group-name list. + GroupNamesByPeerID map[string][]string +} + +func appendRemotePeerConfig(dst []*proto.RemotePeerConfig, peers []*nbpeer.Peer, c AppendRemotePeerConfigContext) []*proto.RemotePeerConfig { + var cfgConnMode string + var cfgRelayTO, cfgP2pTO, cfgP2pRetryMax uint32 + if c.Cfg != nil { + cfgConnMode = derefStringOrEmpty(c.Cfg.ConnectionMode) + cfgRelayTO = derefUint32OrZero(c.Cfg.RelayTimeoutSeconds) + cfgP2pTO = derefUint32OrZero(c.Cfg.P2pTimeoutSeconds) + cfgP2pRetryMax = derefUint32OrZero(c.Cfg.P2pRetryMaxSeconds) + } + for _, rPeer := range peers { - dst = append(dst, &proto.RemotePeerConfig{ - WgPubKey: rPeer.Key, - AllowedIps: []string{rPeer.IP.String() + "/32"}, - SshConfig: &proto.SSHConfig{SshPubKey: []byte(rPeer.SSHKey)}, - Fqdn: rPeer.FQDN(dnsName), + cfg := &proto.RemotePeerConfig{ + WgPubKey: rPeer.Key, + AllowedIps: []string{rPeer.IP.String() + "/32"}, + SshConfig: &proto.SSHConfig{SshPubKey: []byte(rPeer.SSHKey)}, + Fqdn: rPeer.FQDN(c.DNSDomain), + AgentVersion: rPeer.Meta.WtVersion, - }) + + // Phase 3.7i: effective values from the peer's last self-report. + EffectiveConnectionMode: rPeer.Meta.EffectiveConnectionMode, + EffectiveRelayTimeoutSecs: rPeer.Meta.EffectiveRelayTimeoutSecs, + EffectiveP2PTimeoutSecs: rPeer.Meta.EffectiveP2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: rPeer.Meta.EffectiveP2PRetryMaxSecs, + + // Phase 3.7i: account-wide configured values from Settings. + ConfiguredConnectionMode: cfgConnMode, + ConfiguredRelayTimeoutSecs: cfgRelayTO, + ConfiguredP2PTimeoutSecs: cfgP2pTO, + ConfiguredP2PRetryMaxSecs: cfgP2pRetryMax, + + // Phase 3.7i: server-knowledge fields surfaced to UIs. + Groups: c.GroupNamesByPeerID[rPeer.ID], + } + // nbpeer.Peer.Status is *PeerStatus; nil-guard before accessing. + if rPeer.Status != nil { + if !rPeer.Status.LastSeen.IsZero() { + cfg.LastSeenAtServer = timestamppb.New(rPeer.Status.LastSeen) + } + cfg.LiveOnline = rPeer.Status.Connected + } + // New servers always know per-peer liveness; signal that to new + // clients so they can trust LiveOnline directly instead of + // guessing from the LastSeenAtServer-zero heuristic. Old servers + // leave this field at default (false) and clients fall back. + cfg.ServerLivenessKnown = true + dst = append(dst, cfg) } return dst } +// derefStringOrEmpty returns the pointed-to string or "" for nil. +// Used for *string Settings fields where "" means "account hasn't +// configured a mode; UI shows it as unset". +func derefStringOrEmpty(s *string) string { + if s == nil { + return "" + } + return *s +} + +// derefUint32OrZero returns the pointed-to uint32 or 0 for nil. +// Used for *uint32 Settings fields where 0 means "account hasn't set +// an override; daemon falls back to its built-in default". +func derefUint32OrZero(u *uint32) uint32 { + if u == nil { + return 0 + } + return *u +} + +// BuildGroupNamesByPeerID constructs a peerID → sorted-group-names map +// from the account's Groups in a single pass. Callers pass this to +// ToSyncResponse so that appendRemotePeerConfig can annotate each +// RemotePeerConfig.Groups without any additional DB calls. +func BuildGroupNamesByPeerID(groups map[string]*types.Group) map[string][]string { + result := make(map[string][]string, len(groups)) + for _, g := range groups { + for _, peerID := range g.Peers { + result[peerID] = append(result[peerID], g.Name) + } + } + for peerID := range result { + sort.Strings(result[peerID]) + } + return result +} + // toProtocolDNSConfig converts nbdns.Config to proto.DNSConfig using the cache func toProtocolDNSConfig(update nbdns.Config, cache *cache.DNSConfigCache, forwardPort int64) *proto.DNSConfig { protoUpdate := &proto.DNSConfig{ diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 1e75caf959a..7eddff39ff2 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -2,6 +2,7 @@ package grpc import ( "fmt" + "net" "net/netip" "reflect" "testing" @@ -12,8 +13,270 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/types" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) +// TestToPeerConfig_ConnectionModeResolution covers Phase 1 of issue #5989: +// the management server resolves the effective ConnectionMode from +// Settings (with the new ConnectionMode field winning over the legacy +// LazyConnectionEnabled boolean), then writes BOTH wire fields so old +// clients (boolean only) and new clients (enum only) see consistent +// behaviour. +func TestToPeerConfig_ConnectionModeResolution(t *testing.T) { + cases := []struct { + name string + settingsMode *string + settingsLazyBool bool + settingsRelayTO *uint32 + settingsP2pTO *uint32 + wantPCMode mgmProto.ConnectionMode + wantPCLazyBool bool + wantPCRelayTO uint32 + wantPCP2pTO uint32 + }{ + { + name: "no settings -> P2P + lazy=false", + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "only legacy lazy=true -> P2P_LAZY + lazy=true", + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p-lazy explicit -> P2P_LAZY + lazy=true", + settingsMode: strPtrTest("p2p-lazy"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p explicit -> P2P + lazy=false", + settingsMode: strPtrTest("p2p"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode=relay-forced -> RELAY_FORCED + lazy=false (structural compat gap)", + settingsMode: strPtrTest("relay-forced"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode wins over conflicting legacy bool", + settingsMode: strPtrTest("relay-forced"), + settingsLazyBool: true, // ignored + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "RelayTimeout propagates", + settingsMode: strPtrTest("p2p-lazy"), + settingsRelayTO: u32PtrTest(42), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + wantPCRelayTO: 42, + }, + { + name: "P2pTimeout propagates", + settingsMode: strPtrTest("p2p-dynamic"), + settingsP2pTO: u32PtrTest(180), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + wantPCLazyBool: false, // p2p-dynamic maps to lazy=false (best-match for old clients) + wantPCP2pTO: 180, + }, + { + name: "Garbage in ConnectionMode falls back to legacy bool", + settingsMode: strPtrTest("not-a-mode"), + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + } + + // Minimal Network and Peer fixtures shared across cases. + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + settings := &types.Settings{ + LazyConnectionEnabled: c.settingsLazyBool, + ConnectionMode: c.settingsMode, + RelayTimeoutSeconds: c.settingsRelayTO, + P2pTimeoutSeconds: c.settingsP2pTO, + } + pc := toPeerConfig(peer, network, "example.local", settings, nil, nil, false) + + assert.Equal(t, c.wantPCMode, pc.GetConnectionMode(), + "ConnectionMode wire field") + assert.Equal(t, c.wantPCLazyBool, pc.GetLazyConnectionEnabled(), + "LazyConnectionEnabled wire field (backwards-compat)") + assert.Equal(t, c.wantPCRelayTO, pc.GetRelayTimeoutSeconds(), + "RelayTimeoutSeconds wire field") + assert.Equal(t, c.wantPCP2pTO, pc.GetP2PTimeoutSeconds(), + "P2PTimeoutSeconds wire field") + }) + } +} + +func strPtrTest(s string) *string { return &s } +func u32PtrTest(v uint32) *uint32 { return &v } + +// toPeerConfigForTest is a minimal helper that calls toPeerConfig with a +// fixed peer and network fixture, forwarding only the settings argument. +// Used by the P2pRetryMaxSeconds sentinel tests (Phase 3 / #5989). +func toPeerConfigForTest(settings *types.Settings) *mgmProto.PeerConfig { + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + return toPeerConfig(peer, network, "example.local", settings, nil, nil, false) +} + +func TestToPeerConfig_P2pRetryMax_NullDB(t *testing.T) { + settings := &types.Settings{ + P2pRetryMaxSeconds: nil, // DB has NULL + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 0 { + t.Errorf("NULL in DB should produce 0 on the wire (= use daemon default), got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_ExplicitDisable(t *testing.T) { + zero := uint32(0) + settings := &types.Settings{ + P2pRetryMaxSeconds: &zero, // user explicitly set 0 + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != ^uint32(0) { + t.Errorf("explicit 0 should map to uint32-max sentinel on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_NormalValue(t *testing.T) { + v := uint32(600) + settings := &types.Settings{ + P2pRetryMaxSeconds: &v, + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 600 { + t.Errorf("expected 600 on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + +// Phase 3.7i (#5989): legacy-client capability fallback. Clients that do +// not advertise the "p2p_dynamic" capability in PeerSystemMeta must be +// downgraded to p2p-lazy when the account ConnectionMode is p2p-dynamic +// and the LegacyLazyFallbackEnabled toggle is on. Clients that DO +// advertise the capability must pass through unchanged. + +// toPeerConfigWithFeatures builds a peer with the given supported_features +// list and returns the resolved PeerConfig with the supplied settings. +func toPeerConfigWithFeatures(settings *types.Settings, features []string) *mgmProto.PeerConfig { + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + Meta: nbpeer.PeerSystemMeta{ + SupportedFeatures: features, + }, + } + return toPeerConfig(peer, network, "example.local", settings, nil, nil, false) +} + +func TestToPeerConfig_LegacyFallback_LegacyClient_GetsLazyDowngrade(t *testing.T) { + rt := uint32(300) + settings := &types.Settings{ + ConnectionMode: strPtrTest("p2p-dynamic"), + RelayTimeoutSeconds: &rt, + LegacyLazyFallbackEnabled: true, + LegacyLazyFallbackTimeoutSeconds: 3600, + } + pc := toPeerConfigWithFeatures(settings, nil) // legacy: no capability advertised + + if pc.GetConnectionMode() != mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY { + t.Errorf("legacy client should get P2P_LAZY, got %v", pc.GetConnectionMode()) + } + if !pc.GetLazyConnectionEnabled() { + t.Error("legacy client should have LazyConnectionEnabled=true") + } + if pc.GetRelayTimeoutSeconds() != 3600 { + t.Errorf("legacy client should get LegacyLazyFallbackTimeoutSeconds=3600, got %d", pc.GetRelayTimeoutSeconds()) + } +} + +func TestToPeerConfig_LegacyFallback_NewClient_PassesThrough(t *testing.T) { + rt := uint32(300) + settings := &types.Settings{ + ConnectionMode: strPtrTest("p2p-dynamic"), + RelayTimeoutSeconds: &rt, + LegacyLazyFallbackEnabled: true, + LegacyLazyFallbackTimeoutSeconds: 3600, + } + pc := toPeerConfigWithFeatures(settings, []string{"p2p_dynamic"}) + + if pc.GetConnectionMode() != mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC { + t.Errorf("new client should keep P2P_DYNAMIC, got %v", pc.GetConnectionMode()) + } + if pc.GetRelayTimeoutSeconds() != 300 { + t.Errorf("new client should get account RelayTimeoutSeconds=300, got %d", pc.GetRelayTimeoutSeconds()) + } +} + +func TestToPeerConfig_LegacyFallback_DisabledToggle_LegacyClientGetsRawDynamic(t *testing.T) { + rt := uint32(300) + settings := &types.Settings{ + ConnectionMode: strPtrTest("p2p-dynamic"), + RelayTimeoutSeconds: &rt, + LegacyLazyFallbackEnabled: false, // admin opted out + LegacyLazyFallbackTimeoutSeconds: 3600, // unused + } + pc := toPeerConfigWithFeatures(settings, nil) // legacy + + if pc.GetConnectionMode() != mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC { + t.Errorf("with toggle OFF, legacy client should still get raw P2P_DYNAMIC (admin choice), got %v", pc.GetConnectionMode()) + } + if pc.GetRelayTimeoutSeconds() != 300 { + t.Errorf("toggle OFF: should keep account RelayTimeoutSeconds=300, got %d", pc.GetRelayTimeoutSeconds()) + } +} + +func TestToPeerConfig_LegacyFallback_NonDynamicMode_NoOverride(t *testing.T) { + rt := uint32(300) + settings := &types.Settings{ + ConnectionMode: strPtrTest("p2p-lazy"), // not p2p-dynamic + RelayTimeoutSeconds: &rt, + LegacyLazyFallbackEnabled: true, + LegacyLazyFallbackTimeoutSeconds: 3600, + } + pc := toPeerConfigWithFeatures(settings, nil) // legacy client + + if pc.GetConnectionMode() != mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY { + t.Errorf("legacy client in p2p-lazy mode should pass through (no override), got %v", pc.GetConnectionMode()) + } + if pc.GetRelayTimeoutSeconds() != 300 { + t.Errorf("non-dynamic mode: should use account RelayTimeoutSeconds=300, got %d", pc.GetRelayTimeoutSeconds()) + } +} + func TestToProtocolDNSConfigWithCache(t *testing.T) { var cache cache.DNSConfigCache diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 0c1611e7f61..aa68bb748d8 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -31,6 +31,7 @@ import ( nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/store" @@ -86,6 +87,10 @@ type Server struct { reverseProxyManager rpservice.Manager reverseProxyMu sync.RWMutex + + // Phase 3.7i of #5989: shared peer-connection-map state + peerConnections peer_connections.Store + snapshotRouter *peer_connections.SnapshotRouter } // NewServer creates a new Management server @@ -101,7 +106,19 @@ func NewServer( networkMapController network_map.Controller, oAuthConfigProvider idp.OAuthConfigProvider, sessionStore *auth.SessionStore, + peerConnStore peer_connections.Store, + peerConnRouter *peer_connections.SnapshotRouter, ) (*Server, error) { + // Defensive defaults for Phase 3.7i wiring: production callers pass + // non-nil values built by the BaseServer; some test fixtures pass + // nil. Without these the Sync handler nil-derefs in Register(). + if peerConnStore == nil { + peerConnStore = peer_connections.NewMemoryStore(5 * time.Minute) + } + if peerConnRouter == nil { + peerConnRouter = peer_connections.NewSnapshotRouter() + } + if appMetrics != nil { // update gauge based on number of connected peers which is equal to open gRPC streams err := appMetrics.GRPCMetrics().RegisterConnectedStreams(func() int64 { @@ -149,6 +166,9 @@ func NewServer( syncLim: syncLim, syncLimEnabled: syncLimEnabled, + + peerConnections: peerConnStore, + snapshotRouter: peerConnRouter, }, nil } @@ -422,6 +442,10 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg debouncer := NewUpdateDebouncer(1000 * time.Millisecond) defer debouncer.Stop() + // Phase 3.7i (#5989): register for SnapshotRequest dispatch. + snapshotCh := s.snapshotRouter.Register(peerKey.String()) + defer s.snapshotRouter.Unregister(peerKey.String(), snapshotCh) + for { select { // condition when there are some updates @@ -466,6 +490,22 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg log.WithContext(ctx).Debugf("stream of peer %s has been closed", peerKey.String()) s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return srv.Context().Err() + + // Phase 3.7i (#5989): NEW case — on-demand snapshot request. + // Bypasses the debouncer because dashboard refresh has a + // <3 s end-to-end latency budget. Direct sendUpdate. + case nonce, ok := <-snapshotCh: + if !ok { + continue + } + snapMsg := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + SnapshotRequest: &proto.PeerSnapshotRequest{Nonce: nonce}, + }, + } + if err := s.sendUpdate(ctx, accountID, peerKey, peer, snapMsg, srv, streamStartTime); err != nil { + log.WithContext(ctx).Warnf("send snapshot request to %s: %v", peerKey.String(), err) + } } } } @@ -681,7 +721,12 @@ func extractPeerMeta(ctx context.Context, meta *proto.PeerSystemMeta) nbpeer.Pee BlockInbound: meta.GetFlags().GetBlockInbound(), LazyConnectionEnabled: meta.GetFlags().GetLazyConnectionEnabled(), }, - Files: files, + Files: files, + EffectiveConnectionMode: meta.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: meta.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: meta.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: meta.GetEffectiveP2PRetryMaxSecs(), + SupportedFeatures: meta.GetSupportedFeatures(), } } @@ -921,7 +966,19 @@ func (s *Server) sendInitialSync(ctx context.Context, peerKey wgtypes.Key, peer return status.Errorf(codes.Internal, "failed to get peer groups %s", err) } - plainResp := ToSyncResponse(ctx, s.config, s.config.HttpConfig, s.config.DeviceAuthorizationFlow, peer, turnToken, relayToken, networkMap, s.networkMapController.GetDNSDomain(settings), postureChecks, nil, settings, settings.Extra, peerGroups, dnsFwdPort) + // Phase 3.7i: build group-names map for RemotePeerConfig annotations. + accountGroups, err := s.accountManager.GetStore().GetAccountGroups(ctx, store.LockingStrengthNone, peer.AccountID) + if err != nil { + log.WithContext(ctx).Warnf("failed to get account groups for peer %s: %v", peer.ID, err) + accountGroups = nil + } + groupsMap := make(map[string]*types.Group, len(accountGroups)) + for _, g := range accountGroups { + groupsMap[g.ID] = g + } + groupNamesByPeerID := BuildGroupNamesByPeerID(groupsMap) + + plainResp := ToSyncResponse(ctx, s.config, s.config.HttpConfig, s.config.DeviceAuthorizationFlow, peer, turnToken, relayToken, networkMap, s.networkMapController.GetDNSDomain(settings), postureChecks, nil, settings, settings.Extra, peerGroups, dnsFwdPort, groupNamesByPeerID) key, err := s.secretsManager.GetWGKey() if err != nil { @@ -1122,6 +1179,22 @@ func (s *Server) SyncMeta(ctx context.Context, req *proto.EncryptedMessage) (*pr return &proto.Empty{}, nil } +// SyncPeerConnections receives a per-peer connection map from a peer. +// Phase 3.7i of #5989. Mirrors SyncMeta's parseRequest pattern: +// decrypts the EncryptedMessage envelope, authenticates the peer pubkey, +// stores the decoded PeerConnectionMap under that pubkey. +func (s *Server) SyncPeerConnections(ctx context.Context, req *proto.EncryptedMessage) (*proto.Empty, error) { + pcm := &proto.PeerConnectionMap{} + peerKey, err := s.parseRequest(ctx, req, pcm) + if err != nil { + return nil, err + } + if s.peerConnections != nil { + s.peerConnections.Put(peerKey.String(), pcm) + } + return &proto.Empty{}, nil +} + func (s *Server) Logout(ctx context.Context, req *proto.EncryptedMessage) (*proto.Empty, error) { log.WithContext(ctx).Debugf("Logout request from peer [%s]", req.WgPubKey) start := time.Now() diff --git a/management/server/account.go b/management/server/account.go index 4b71ab486eb..fe68cfd40ca 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -333,7 +333,13 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco oldSettings.LazyConnectionEnabled != newSettings.LazyConnectionEnabled || oldSettings.DNSDomain != newSettings.DNSDomain || oldSettings.AutoUpdateVersion != newSettings.AutoUpdateVersion || - oldSettings.AutoUpdateAlways != newSettings.AutoUpdateAlways { + oldSettings.AutoUpdateAlways != newSettings.AutoUpdateAlways || + !types.StringPtrEqual(oldSettings.ConnectionMode, newSettings.ConnectionMode) || + !types.Uint32PtrEqual(oldSettings.RelayTimeoutSeconds, newSettings.RelayTimeoutSeconds) || + !types.Uint32PtrEqual(oldSettings.P2pTimeoutSeconds, newSettings.P2pTimeoutSeconds) || + !types.Uint32PtrEqual(oldSettings.P2pRetryMaxSeconds, newSettings.P2pRetryMaxSeconds) || + oldSettings.LegacyLazyFallbackEnabled != newSettings.LegacyLazyFallbackEnabled || + oldSettings.LegacyLazyFallbackTimeoutSeconds != newSettings.LegacyLazyFallbackTimeoutSeconds { updateAccountPeers = true } @@ -371,6 +377,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.handleRoutingPeerDNSResolutionSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleLazyConnectionSettings(ctx, oldSettings, newSettings, userID, accountID) + am.handleConnectionModeSettings(ctx, oldSettings, newSettings, userID, accountID) am.handlePeerLoginExpirationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleGroupsPropagationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleAutoUpdateVersionSettings(ctx, oldSettings, newSettings, userID, accountID) @@ -455,6 +462,85 @@ func (am *DefaultAccountManager) handleLazyConnectionSettings(ctx context.Contex } } +// handleConnectionModeSettings emits one audit event per changed Phase-1 +// connection-mode setting (mode, relay timeout, p2p timeout). Each event +// carries old/new values in the meta payload so administrators can audit +// the full transition. NULL transitions show as empty string / 0 in the +// meta — chosen over a sentinel so the frontend can render uniformly. +func (am *DefaultAccountManager) handleConnectionModeSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { + if !equalStringPtr(oldSettings.ConnectionMode, newSettings.ConnectionMode) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountConnectionModeChanged, map[string]any{ + "old": derefStringPtr(oldSettings.ConnectionMode), + "new": derefStringPtr(newSettings.ConnectionMode), + }) + } + if !equalUint32Ptr(oldSettings.RelayTimeoutSeconds, newSettings.RelayTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountRelayTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.RelayTimeoutSeconds), + "new": derefUint32Ptr(newSettings.RelayTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pTimeoutSeconds, newSettings.P2pTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pTimeoutSeconds), + "new": derefUint32Ptr(newSettings.P2pTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pRetryMaxSeconds, newSettings.P2pRetryMaxSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pRetryMaxChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pRetryMaxSeconds), + "new": derefUint32Ptr(newSettings.P2pRetryMaxSeconds), + }) + } + // Phase 3.7i (#5989): legacy-client lazy-fallback settings. + if oldSettings.LegacyLazyFallbackEnabled != newSettings.LegacyLazyFallbackEnabled { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountLegacyLazyFallbackEnabledChanged, map[string]any{ + "old": oldSettings.LegacyLazyFallbackEnabled, + "new": newSettings.LegacyLazyFallbackEnabled, + }) + } + if oldSettings.LegacyLazyFallbackTimeoutSeconds != newSettings.LegacyLazyFallbackTimeoutSeconds { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountLegacyLazyFallbackTimeoutChanged, map[string]any{ + "old": oldSettings.LegacyLazyFallbackTimeoutSeconds, + "new": newSettings.LegacyLazyFallbackTimeoutSeconds, + }) + } +} + +func equalStringPtr(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32Ptr(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func derefStringPtr(p *string) string { + if p == nil { + return "" + } + return *p +} + +func derefUint32Ptr(p *uint32) uint32 { + if p == nil { + return 0 + } + return *p +} + func (am *DefaultAccountManager) handlePeerLoginExpirationSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { if oldSettings.PeerLoginExpirationEnabled != newSettings.PeerLoginExpirationEnabled { event := activity.AccountPeerLoginExpirationEnabled @@ -1900,6 +1986,14 @@ func newAccountWithId(ctx context.Context, accountID, userID, domain, email, nam PeerInactivityExpirationEnabled: false, PeerInactivityExpiration: types.DefaultPeerInactivityExpiration, RoutingPeerDNSResolutionEnabled: true, + // Phase 3.7i (#5989): legacy-fallback defaults must travel + // with newly-created accounts. The GORM `default:` only + // applies on SQL INSERT and would leave in-memory copies + // at false/0, which the conversion layer then interprets + // as "fallback disabled, timeout 0" -- the exact bug + // Codex flagged. + LegacyLazyFallbackEnabled: types.DefaultLegacyLazyFallbackEnabled, + LegacyLazyFallbackTimeoutSeconds: types.DefaultLegacyLazyFallbackTimeoutSeconds, Extra: &types.ExtraSettings{ UserApprovalRequired: true, }, @@ -2009,6 +2103,10 @@ func (am *DefaultAccountManager) GetOrCreateAccountByPrivateDomain(ctx context.C PeerInactivityExpirationEnabled: false, PeerInactivityExpiration: types.DefaultPeerInactivityExpiration, RoutingPeerDNSResolutionEnabled: true, + // Phase 3.7i (#5989): same defaults as the primary + // NewAccount path -- see comment there for rationale. + LegacyLazyFallbackEnabled: types.DefaultLegacyLazyFallbackEnabled, + LegacyLazyFallbackTimeoutSeconds: types.DefaultLegacyLazyFallbackTimeoutSeconds, Extra: &types.ExtraSettings{ UserApprovalRequired: true, }, diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 626ed222dfb..7c779dbe49f 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -104,6 +104,10 @@ type Manager interface { GetDNSSettings(ctx context.Context, accountID string, userID string) (*types.DNSSettings, error) SaveDNSSettings(ctx context.Context, accountID string, userID string, dnsSettingsToSave *types.DNSSettings) error GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + // GetPeerByPubKey returns the peer with the given WireGuard public key from + // the given account. Phase 3.7i of #5989 — used by REST handlers to enrich + // PeerConnectionMap entries with FQDNs. + GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) UpdateAccountSettings(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) UpdateAccountOnboarding(ctx context.Context, accountID, userID string, newOnboarding *types.AccountOnboarding) (*types.AccountOnboarding, error) LoginPeer(ctx context.Context, login types.PeerLogin) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) // used by peer gRPC API diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index 8f3b22eccff..5f2721ff58e 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -900,6 +900,21 @@ func (mr *MockManagerMockRecorder) GetPeer(ctx, accountID, peerID, userID interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeer", reflect.TypeOf((*MockManager)(nil).GetPeer), ctx, accountID, peerID, userID) } +// GetPeerByPubKey mocks base method. Phase 3.7i of #5989. +func (m *MockManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerByPubKey", ctx, accountID, pubKey) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerByPubKey indicates an expected call of GetPeerByPubKey. +func (mr *MockManagerMockRecorder) GetPeerByPubKey(ctx, accountID, pubKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerByPubKey", reflect.TypeOf((*MockManager)(nil).GetPeerByPubKey), ctx, accountID, pubKey) +} + // GetPeerGroups mocks base method. func (m *MockManager) GetPeerGroups(ctx context.Context, accountID, peerID string) ([]*types.Group, error) { m.ctrl.T.Helper() diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index ddc3e00c38d..d9429718665 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -232,6 +232,25 @@ const ( // DomainValidated indicates that a custom domain was validated DomainValidated Activity = 120 + // AccountConnectionModeChanged indicates the account-wide ConnectionMode + // setting was changed (Phase 1 of issue #5989). + AccountConnectionModeChanged Activity = 121 + // AccountRelayTimeoutChanged indicates the account-wide RelayTimeoutSeconds + // setting was changed. + AccountRelayTimeoutChanged Activity = 122 + // AccountP2pTimeoutChanged indicates the account-wide P2pTimeoutSeconds + // setting was changed. + AccountP2pTimeoutChanged Activity = 123 + // AccountP2pRetryMaxChanged indicates the account-wide P2pRetryMaxSeconds + // setting was modified (Phase 3 of #5989). + AccountP2pRetryMaxChanged Activity = 124 + // AccountLegacyLazyFallbackEnabledChanged indicates the account-wide + // LegacyLazyFallbackEnabled toggle was changed (Phase 3.7i of #5989). + AccountLegacyLazyFallbackEnabledChanged Activity = 125 + // AccountLegacyLazyFallbackTimeoutChanged indicates the account-wide + // LegacyLazyFallbackTimeoutSeconds setting was changed (Phase 3.7i). + AccountLegacyLazyFallbackTimeoutChanged Activity = 126 + AccountDeleted Activity = 99999 ) @@ -335,6 +354,13 @@ var activityMap = map[Activity]Code{ AccountLazyConnectionEnabled: {"Account lazy connection enabled", "account.setting.lazy.connection.enable"}, AccountLazyConnectionDisabled: {"Account lazy connection disabled", "account.setting.lazy.connection.disable"}, + AccountConnectionModeChanged: {"Account connection mode changed", "account.setting.connection_mode.change"}, + AccountRelayTimeoutChanged: {"Account relay timeout changed", "account.setting.relay_timeout.change"}, + AccountP2pTimeoutChanged: {"Account p2p timeout changed", "account.setting.p2p_timeout.change"}, + AccountP2pRetryMaxChanged: {"Account p2p retry max changed", "account.setting.p2p_retry_max.change"}, + AccountLegacyLazyFallbackEnabledChanged: {"Account legacy lazy-fallback toggle changed", "account.setting.legacy_lazy_fallback.toggle.change"}, + AccountLegacyLazyFallbackTimeoutChanged: {"Account legacy lazy-fallback timeout changed", "account.setting.legacy_lazy_fallback.timeout.change"}, + AccountNetworkRangeUpdated: {"Account network range updated", "account.network.range.update"}, PeerIPUpdated: {"Peer IP updated", "peer.ip.update"}, diff --git a/management/server/http/handler.go b/management/server/http/handler.go index b9ea605d36f..0c596238732 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -20,6 +20,7 @@ import ( nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" idpmanager "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/management-integrations/integrations" @@ -47,6 +48,7 @@ import ( "github.com/netbirdio/netbird/management/server/http/handlers/idp" "github.com/netbirdio/netbird/management/server/http/handlers/instance" "github.com/netbirdio/netbird/management/server/http/handlers/networks" + peer_connections_http "github.com/netbirdio/netbird/management/server/http/handlers/peer_connections" "github.com/netbirdio/netbird/management/server/http/handlers/peers" "github.com/netbirdio/netbird/management/server/http/handlers/policies" "github.com/netbirdio/netbird/management/server/http/handlers/routes" @@ -59,13 +61,25 @@ import ( nbnetworks "github.com/netbirdio/netbird/management/server/networks" "github.com/netbirdio/netbird/management/server/networks/resources" "github.com/netbirdio/netbird/management/server/networks/routers" + nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/telemetry" ) const apiPrefix = "/api" +// APIHandler wraps the HTTP router and holds shared state for all HTTP handlers. +// The peerConnections and snapshotRouter fields are constructed once in boot.go +// and shared with the gRPC server so both sides see the same in-memory state. +// Phase 3.7i of #5989; HTTP routes that consume these are registered in Task 4.2. +type APIHandler struct { + http.Handler + + peerConnections peer_connections.Store + snapshotRouter *peer_connections.SnapshotRouter +} + // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter, peerConnStore peer_connections.Store, peerConnRouter *peer_connections.SnapshotRouter) (*APIHandler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -124,6 +138,16 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks accounts.AddEndpoints(accountManager, settingsManager, router) peers.AddEndpoints(accountManager, router, networkMapController, permissionsManager) + + // Phase 3.7i of #5989: peer connection-map REST routes. + peerConnHandler := peer_connections_http.NewHandler( + peerConnStore, + &pcAccountManagerAdapter{am: accountManager, nmc: networkMapController}, + peerConnRouter, + ) + router.HandleFunc("/peers/{peerId}/connections", peerConnHandler.GetPeerConnections).Methods("GET", "OPTIONS") + router.HandleFunc("/peers/{peerId}/connections/refresh", peerConnHandler.PostRefresh).Methods("POST", "OPTIONS") + users.AddEndpoints(accountManager, router) users.AddInvitesEndpoints(accountManager, router) users.AddPublicInvitesEndpoints(accountManager, router) @@ -155,5 +179,42 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks rootRouter.PathPrefix("/oauth2").Handler(corsMiddleware.Handler(embeddedIdP.Handler())) } - return rootRouter, nil + return &APIHandler{ + Handler: rootRouter, + peerConnections: peerConnStore, + snapshotRouter: peerConnRouter, + }, nil +} + +// pcAccountManagerAdapter bridges the real account.Manager into the small +// interface peer_connections.Handler uses. Phase 3.7i of #5989. +type pcAccountManagerAdapter struct { + am account.Manager + nmc network_map.Controller +} + +func (a *pcAccountManagerAdapter) GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) { + return a.am.GetPeer(ctx, accountID, peerID, userID) +} + +func (a *pcAccountManagerAdapter) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + return a.am.GetPeerByPubKey(ctx, accountID, pubKey) +} + +// GetDNSDomain resolves the configured DNS domain for the account. +// It reads the account settings and delegates to the networkMapController +// which applies the global default when the account has no custom domain. +// Falls back to "" on error — FQDN enrichment in the handler is best-effort. +func (a *pcAccountManagerAdapter) GetDNSDomain(ctx context.Context, accountID string) string { + settings, err := a.am.GetAccountSettings(ctx, accountID, "internal") + if err != nil { + return "" + } + if a.nmc == nil { + if settings != nil { + return settings.DNSDomain + } + return "" + } + return a.nmc.GetDNSDomain(settings) } diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index cc5567e3db6..bc071c097bf 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "math" "net/http" "net/netip" "time" @@ -182,6 +183,18 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS PeerExposeEnabled: req.Settings.PeerExposeEnabled, PeerExposeGroups: req.Settings.PeerExposeGroups, + + // Phase 3.7i (#5989): seed the legacy-fallback fields with their + // semantic defaults BEFORE the per-field if-blocks below run. + // This handler always rebuilds Settings from scratch, so an + // API client (or older Dashboard) that omits the new fields + // would otherwise downgrade the account to false / 0 -- which + // the conversion layer interprets as "fallback disabled, + // timeout 0", the exact regression Codex flagged. The + // per-field if-blocks below override these defaults when the + // request explicitly sets a value (including false / explicit 0). + LegacyLazyFallbackEnabled: types.DefaultLegacyLazyFallbackEnabled, + LegacyLazyFallbackTimeoutSeconds: types.DefaultLegacyLazyFallbackTimeoutSeconds, } if req.Settings.Extra != nil { @@ -215,6 +228,59 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS if req.Settings.LazyConnectionEnabled != nil { returnSettings.LazyConnectionEnabled = *req.Settings.LazyConnectionEnabled } + if req.Settings.ConnectionMode != nil { + modeStr := string(*req.Settings.ConnectionMode) + if !req.Settings.ConnectionMode.Valid() { + return nil, fmt.Errorf("invalid connection_mode %q", modeStr) + } + // Persist as the canonical string. Important: returnSettings + // is a fresh struct built from scratch by this handler -- if + // the request body omits connection_mode (or sets JSON null, + // which deserializes to a nil pointer), this whole block is + // skipped AND returnSettings.ConnectionMode stays nil, which + // the storage layer interprets as "clear the override". To + // preserve the existing value the caller must include the + // current value explicitly in the PUT body. This is also true + // for the four timeout fields below. + s := modeStr + returnSettings.ConnectionMode = &s + } + if req.Settings.P2pTimeoutSeconds != nil { + v, err := validateUint32Timeout("p2p_timeout_seconds", *req.Settings.P2pTimeoutSeconds) + if err != nil { + return nil, err + } + returnSettings.P2pTimeoutSeconds = &v + } + if req.Settings.P2pRetryMaxSeconds != nil { + v, err := validateUint32Timeout("p2p_retry_max_seconds", *req.Settings.P2pRetryMaxSeconds) + if err != nil { + return nil, err + } + returnSettings.P2pRetryMaxSeconds = &v + } + if req.Settings.RelayTimeoutSeconds != nil { + v, err := validateUint32Timeout("relay_timeout_seconds", *req.Settings.RelayTimeoutSeconds) + if err != nil { + return nil, err + } + returnSettings.RelayTimeoutSeconds = &v + } + if req.Settings.LegacyLazyFallbackEnabled != nil { + returnSettings.LegacyLazyFallbackEnabled = *req.Settings.LegacyLazyFallbackEnabled + } + if req.Settings.LegacyLazyFallbackTimeoutSeconds != nil { + // Phase 3.7i (#5989): legacy fallback timeout. Range chosen to + // match the range an admin would plausibly set on a metered LTE + // fleet: 60s lower bound (anything shorter just hammers + // signaling), 86400s upper bound (24h - longer than that and the + // fallback is effectively "never tear down"). + v := *req.Settings.LegacyLazyFallbackTimeoutSeconds + if v < 60 || v > 86400 { + return nil, fmt.Errorf("invalid legacy_lazy_fallback_timeout_seconds %d (must be between 60 and 86400)", v) + } + returnSettings.LegacyLazyFallbackTimeoutSeconds = uint32(v) + } if req.Settings.AutoUpdateVersion != nil { _, err := goversion.NewSemver(*req.Settings.AutoUpdateVersion) if *req.Settings.AutoUpdateVersion == autoUpdateLatestVersion || @@ -349,6 +415,55 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A PeerExposeEnabled: settings.PeerExposeEnabled, PeerExposeGroups: settings.PeerExposeGroups, LazyConnectionEnabled: &settings.LazyConnectionEnabled, + ConnectionMode: func() *api.AccountSettingsConnectionMode { + if settings.ConnectionMode == nil { + return nil + } + v := api.AccountSettingsConnectionMode(*settings.ConnectionMode) + return &v + }(), + P2pTimeoutSeconds: func() *int64 { + if settings.P2pTimeoutSeconds == nil { + return nil + } + v := int64(*settings.P2pTimeoutSeconds) + return &v + }(), + P2pRetryMaxSeconds: func() *int64 { + if settings.P2pRetryMaxSeconds == nil { + return nil + } + v := int64(*settings.P2pRetryMaxSeconds) + return &v + }(), + RelayTimeoutSeconds: func() *int64 { + if settings.RelayTimeoutSeconds == nil { + return nil + } + v := int64(*settings.RelayTimeoutSeconds) + return &v + }(), + // Phase 3.7i (#5989): expose the legacy-fallback fields with their + // semantic defaults filled in for accounts that pre-date the + // fields (zero-valued in DB). The conversion layer falls back + // to the same defaults at peer-config time, so reporting them + // here keeps API responses honest for the Dashboard and any + // other API consumer. + LegacyLazyFallbackEnabled: func() *bool { + v := settings.LegacyLazyFallbackEnabled + if !v && settings.LegacyLazyFallbackTimeoutSeconds == 0 { + v = types.DefaultLegacyLazyFallbackEnabled + } + return &v + }(), + LegacyLazyFallbackTimeoutSeconds: func() *int64 { + to := settings.LegacyLazyFallbackTimeoutSeconds + if to == 0 { + to = types.DefaultLegacyLazyFallbackTimeoutSeconds + } + v := int64(to) + return &v + }(), DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, AutoUpdateAlways: &settings.AutoUpdateAlways, @@ -386,3 +501,19 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A Onboarding: apiOnboarding, } } + +// validateUint32Timeout converts the int64 value coming from the API +// JSON body into a uint32 suitable for the daemon-internal timeout +// fields. Negative values and values larger than MaxUint32 are +// rejected (Codex review): a raw uint32 cast would silently wrap a +// negative input around to a large positive number, producing a +// timeout the operator never intended to set. +func validateUint32Timeout(name string, v int64) (uint32, error) { + if v < 0 { + return 0, fmt.Errorf("invalid %s: %d (must be >= 0)", name, v) + } + if v > int64(math.MaxUint32) { + return 0, fmt.Errorf("invalid %s: %d (exceeds %d)", name, v, uint64(math.MaxUint32)) + } + return uint32(v), nil +} diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index 739dfe2f655..42e4bfbf69c 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -78,6 +78,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { sr := func(v string) *string { return &v } br := func(v bool) *bool { return &v } + ir := func(v int64) *int64 { return &v } handler := initAccountsTestData(t, &types.Account{ Id: accountID, @@ -120,6 +121,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: true, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), @@ -146,6 +149,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: false, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), @@ -172,6 +177,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: false, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr("latest"), @@ -198,6 +205,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: true, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), @@ -224,6 +233,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: true, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), @@ -250,6 +261,8 @@ func TestAccounts_AccountsHandler(t *testing.T) { RegularUsersViewBlocked: true, RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), DnsDomain: sr(""), AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), @@ -336,3 +349,81 @@ func TestAccounts_AccountsHandler(t *testing.T) { }) } } + +func TestAccountsHandler_PutSettings_P2pRetryMax(t *testing.T) { + accountID := "test_account" + adminUser := types.NewAdminUser("test_user") + + sr := func(v string) *string { return &v } + br := func(v bool) *bool { return &v } + ir := func(v int64) *int64 { return &v } + + handler := initAccountsTestData(t, &types.Account{ + Id: accountID, + Domain: "hotmail.com", + Network: types.NewNetwork(), + Users: map[string]*types.User{ + adminUser.Id: adminUser, + }, + Settings: &types.Settings{ + PeerLoginExpirationEnabled: false, + PeerLoginExpiration: time.Hour, + RegularUsersViewBlocked: false, + }, + }) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest( + http.MethodPut, + "/api/accounts/"+accountID, + bytes.NewBufferString(`{"settings": {"peer_login_expiration": 3600, "peer_login_expiration_enabled": false, "p2p_retry_max_seconds": 600}, "onboarding": {"onboarding_flow_pending": true, "signup_form_pending": true}}`), + ) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: adminUser.Id, + AccountId: accountID, + Domain: "hotmail.com", + }) + + router := mux.NewRouter() + router.HandleFunc("/api/accounts/{accountId}", handler.updateAccount).Methods("PUT") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + if status := recorder.Code; status != http.StatusOK { + t.Fatalf("handler returned wrong status code: got %v want %v", status, http.StatusOK) + } + + content, err := io.ReadAll(res.Body) + if err != nil { + t.Fatalf("could not read response body: %v", err) + } + + var actual api.Account + if err = json.Unmarshal(content, &actual); err != nil { + t.Fatalf("response is not valid JSON: %v", err) + } + + expectedSettings := api.AccountSettings{ + PeerLoginExpiration: 3600, + PeerLoginExpirationEnabled: false, + GroupsPropagationEnabled: br(false), + JwtGroupsClaimName: sr(""), + JwtGroupsEnabled: br(false), + JwtAllowGroups: &[]string{}, + RegularUsersViewBlocked: false, + RoutingPeerDnsResolutionEnabled: br(false), + LazyConnectionEnabled: br(false), + LegacyLazyFallbackEnabled: br(true), + LegacyLazyFallbackTimeoutSeconds: ir(3600), + DnsDomain: sr(""), + AutoUpdateAlways: br(false), + AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), + P2pRetryMaxSeconds: ir(600), + } + + assert.Equal(t, expectedSettings, actual.Settings) +} diff --git a/management/server/http/handlers/accounts/validate_uint32_timeout_test.go b/management/server/http/handlers/accounts/validate_uint32_timeout_test.go new file mode 100644 index 00000000000..bd63c784b09 --- /dev/null +++ b/management/server/http/handlers/accounts/validate_uint32_timeout_test.go @@ -0,0 +1,90 @@ +package accounts + +import ( + "errors" + "math" + "strings" + "testing" +) + +// Codex review: validateUint32Timeout was added to fix the silent +// wrap-around when API JSON int64 fields landed in uint32 daemon +// fields. Make sure the boundary conditions stay covered. +func TestValidateUint32Timeout(t *testing.T) { + tests := []struct { + name string + input int64 + want uint32 + wantErr bool + }{ + {"zero", 0, 0, false}, + {"one", 1, 1, false}, + {"3h_typical_p2p", 10800, 10800, false}, + {"24h_typical_relay", 86400, 86400, false}, + {"max_uint32", int64(math.MaxUint32), math.MaxUint32, false}, + {"max_uint32_plus_one", int64(math.MaxUint32) + 1, 0, true}, + {"negative_one", -1, 0, true}, + {"negative_huge", -86400, 0, true}, + {"int64_max", math.MaxInt64, 0, true}, + {"int64_min", math.MinInt64, 0, true}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := validateUint32Timeout("test_field", tc.input) + if tc.wantErr { + if err == nil { + t.Fatalf("expected error for input %d, got nil", tc.input) + } + if !strings.Contains(err.Error(), "test_field") { + t.Errorf("error must mention field name, got: %v", err) + } + return + } + if err != nil { + t.Fatalf("unexpected error for input %d: %v", tc.input, err) + } + if got != tc.want { + t.Errorf("input %d: got %d, want %d", tc.input, got, tc.want) + } + }) + } +} + +// TestValidateUint32Timeout_ErrorMessageFormat verifies the error +// message includes both the field name and the offending value, so +// API clients see actionable feedback. +func TestValidateUint32Timeout_ErrorMessageFormat(t *testing.T) { + _, err := validateUint32Timeout("relay_timeout_seconds", -42) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "relay_timeout_seconds") { + t.Errorf("error must mention field: %v", err) + } + if !strings.Contains(err.Error(), "-42") { + t.Errorf("error must mention input value: %v", err) + } + + _, err = validateUint32Timeout("p2p_timeout_seconds", int64(math.MaxUint32)+1) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "exceeds") { + t.Errorf("overflow error must say 'exceeds': %v", err) + } +} + +// Sanity: the helper returns plain Go errors (not status.Errorf +// wrappers); the caller wraps them. Document that contract here. +func TestValidateUint32Timeout_PlainError(t *testing.T) { + _, err := validateUint32Timeout("x", -1) + unwrapped := err + if errors.Unwrap(err) != nil { + // fmt.Errorf without %w gives a plain error; if someone changes + // it to %w later this assertion catches the API change. + unwrapped = errors.Unwrap(err) + } + if unwrapped == nil { + t.Fatal("error must be non-nil") + } +} diff --git a/management/server/http/handlers/peer_connections/handler.go b/management/server/http/handlers/peer_connections/handler.go new file mode 100644 index 00000000000..de3a5c64210 --- /dev/null +++ b/management/server/http/handlers/peer_connections/handler.go @@ -0,0 +1,204 @@ +package peer_connections + +import ( + "context" + "encoding/json" + "net/http" + "strconv" + "sync/atomic" + "time" + + "github.com/gorilla/mux" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/peer_connections" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// AccountManager is the slice of the existing AccountManager interface +// this handler needs. Phase 3.7i of #5989. +type AccountManager interface { + GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + GetDNSDomain(ctx context.Context, accountID string) string + GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) +} + +// SnapshotRequester triggers a SnapshotRequest on the peer's active +// Sync server-stream. Phase 3.7i of #5989. +type SnapshotRequester interface { + Request(peerPubKey string, nonce uint64) bool +} + +type Handler struct { + store peer_connections.Store + account AccountManager + router SnapshotRequester + nonce atomic.Uint64 +} + +func NewHandler(store peer_connections.Store, account AccountManager, router SnapshotRequester) *Handler { + return &Handler{store: store, account: account, router: router} +} + +type apiEntry struct { + RemotePubkey string `json:"remote_pubkey"` + RemoteFQDN string `json:"remote_fqdn,omitempty"` + ConnType string `json:"conn_type"` + LastHandshake string `json:"last_handshake,omitempty"` + LatencyMs uint32 `json:"latency_ms,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RelayServer string `json:"relay_server,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty"` +} + +type apiResponse struct { + PeerPubkey string `json:"peer_pubkey"` + Seq uint64 `json:"seq"` + FullSnapshot bool `json:"full_snapshot"` + InResponseTo uint64 `json:"in_response_to_nonce,omitempty"` + Entries []apiEntry `json:"entries"` +} + +type refreshResponse struct { + RefreshToken uint64 `json:"refresh_token"` + CachedMap *apiResponse `json:"cached_map,omitempty"` + // Dispatched is true when the snapshot request was actually delivered + // to an active Sync stream for this peer. False means the peer has + // no live stream (offline / between connections / older daemon + // without snapshot-request support) and the caller can decide whether + // to retry or fall back to the cached map. + Dispatched bool `json:"dispatched"` +} + +// GetPeerConnections handles GET /api/peers/{peerId}/connections. +// 401 missing/invalid auth, 404 peer not found, 200 with body. +// ?since=N blocks up to 5 s for fresh data. +func (h *Handler) GetPeerConnections(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + peerID := mux.Vars(r)["peerId"] + peer, err := h.account.GetPeer(r.Context(), userAuth.AccountId, peerID, userAuth.UserId) + if err != nil { + http.Error(w, "peer not found", http.StatusNotFound) + return + } + + pubkey := peer.Key + since, _ := strconv.ParseUint(r.URL.Query().Get("since"), 10, 64) + + var ( + m *mgmProto.PeerConnectionMap + ok bool + ) + if since > 0 { + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + m, ok = h.store.GetWithNonceCheck(pubkey, since) + if ok { + break + } + select { + case <-ctx.Done(): + m, ok = h.store.Get(pubkey) + goto done + case <-ticker.C: + } + } + } else { + m, ok = h.store.Get(pubkey) + } +done: + if !ok { + http.Error(w, "no connection data yet for this peer", http.StatusNotFound) + return + } + + dnsDomain := h.account.GetDNSDomain(r.Context(), userAuth.AccountId) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(h.buildResponse(r.Context(), userAuth.AccountId, dnsDomain, pubkey, m)) +} + +// PostRefresh handles POST /api/peers/{peerId}/connections/refresh. +func (h *Handler) PostRefresh(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + peerID := mux.Vars(r)["peerId"] + peer, err := h.account.GetPeer(r.Context(), userAuth.AccountId, peerID, userAuth.UserId) + if err != nil { + http.Error(w, "peer not found", http.StatusNotFound) + return + } + + pubkey := peer.Key + nonce := h.nonce.Add(1) + dispatched := false + if h.router != nil { + dispatched = h.router.Request(pubkey, nonce) + } + + dnsDomain := h.account.GetDNSDomain(r.Context(), userAuth.AccountId) + resp := refreshResponse{RefreshToken: nonce, Dispatched: dispatched} + if cached, ok := h.store.Get(pubkey); ok { + ar := h.buildResponse(r.Context(), userAuth.AccountId, dnsDomain, pubkey, cached) + resp.CachedMap = &ar + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + _ = json.NewEncoder(w).Encode(resp) +} + +func (h *Handler) buildResponse(ctx context.Context, accountID, dnsDomain, pubkey string, m *mgmProto.PeerConnectionMap) apiResponse { + resp := apiResponse{ + PeerPubkey: pubkey, + Seq: m.GetSeq(), + FullSnapshot: m.GetFullSnapshot(), + InResponseTo: m.GetInResponseToNonce(), + Entries: make([]apiEntry, 0, len(m.GetEntries())), + } + for _, e := range m.GetEntries() { + entry := apiEntry{ + RemotePubkey: e.GetRemotePubkey(), + ConnType: connTypeToStr(e.GetConnType()), + LatencyMs: e.GetLatencyMs(), + Endpoint: e.GetEndpoint(), + RelayServer: e.GetRelayServer(), + RxBytes: e.GetRxBytes(), + TxBytes: e.GetTxBytes(), + } + if hs := e.GetLastHandshake(); hs != nil && hs.IsValid() { + entry.LastHandshake = hs.AsTime().Format(time.RFC3339) + } + // Enrich remote_fqdn via account-peer lookup (best-effort). + if rPeer, err := h.account.GetPeerByPubKey(ctx, accountID, e.GetRemotePubkey()); err == nil && rPeer != nil { + entry.RemoteFQDN = rPeer.FQDN(dnsDomain) + } + resp.Entries = append(resp.Entries, entry) + } + return resp +} + +func connTypeToStr(ct mgmProto.ConnType) string { + switch ct { + case mgmProto.ConnType_CONN_TYPE_P2P: + return "p2p" + case mgmProto.ConnType_CONN_TYPE_RELAYED: + return "relayed" + case mgmProto.ConnType_CONN_TYPE_CONNECTING: + return "connecting" + case mgmProto.ConnType_CONN_TYPE_IDLE: + return "idle" + default: + return "unspecified" + } +} diff --git a/management/server/http/handlers/peer_connections/handler_test.go b/management/server/http/handlers/peer_connections/handler_test.go new file mode 100644 index 00000000000..76ca4af6506 --- /dev/null +++ b/management/server/http/handlers/peer_connections/handler_test.go @@ -0,0 +1,137 @@ +package peer_connections + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/peer_connections" + "github.com/netbirdio/netbird/shared/auth" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type fakeAM struct { + peers map[string]*nbpeer.Peer // peerID → Peer + peersByKey map[string]*nbpeer.Peer // pubkey → Peer + allowedAcc string + dnsDomain string +} + +func (a *fakeAM) GetPeer(_ context.Context, accountID, peerID, _ string) (*nbpeer.Peer, error) { + if a.allowedAcc != "" && a.allowedAcc != accountID { + return nil, errors.New("not found") + } + p, ok := a.peers[peerID] + if !ok { + return nil, errors.New("not found") + } + return p, nil +} + +func (a *fakeAM) GetPeerByPubKey(_ context.Context, _, pubKey string) (*nbpeer.Peer, error) { + p, ok := a.peersByKey[pubKey] + if !ok { + return nil, errors.New("not found") + } + return p, nil +} + +func (a *fakeAM) GetDNSDomain(_ context.Context, _ string) string { return a.dnsDomain } + +type fakeRouter struct{ calls int } + +func (f *fakeRouter) Request(_ string, _ uint64) bool { f.calls++; return true } + +func authedReq(method, target, accountID, userID string) *http.Request { + r := httptest.NewRequest(method, target, nil) + return nbcontext.SetUserAuthInRequest(r, auth.UserAuth{AccountId: accountID, UserId: userID}) +} + +func TestHandler_GetPeerConnections_Returns200WithCachedData(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + store.Put("PUBKEY-A", &mgmProto.PeerConnectionMap{ + Seq: 1, + FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "PUBKEY-B", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, LatencyMs: 12}}, + }) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + peersByKey: map[string]*nbpeer.Peer{"PUBKEY-B": {ID: "peerB-id", Key: "PUBKEY-B", AccountID: "acc1"}}, + dnsDomain: "test.example", + } + h := NewHandler(store, am, nil) + + r := authedReq("GET", "/api/peers/peerA-id/connections", "acc1", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusOK { + t.Fatalf("want 200, got %d (body %s)", w.Code, w.Body.String()) + } + if !strings.Contains(w.Body.String(), "PUBKEY-B") { + t.Errorf("want PUBKEY-B in body, got %s", w.Body.String()) + } +} + +func TestHandler_GetPeerConnections_401WithoutAuth(t *testing.T) { + h := NewHandler(peer_connections.NewMemoryStore(time.Hour), &fakeAM{}, nil) + r := httptest.NewRequest("GET", "/api/peers/peerA-id/connections", nil) + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusUnauthorized { + t.Fatalf("want 401, got %d", w.Code) + } +} + +func TestHandler_GetPeerConnections_404WhenPeerNotInAccount(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + allowedAcc: "acc1", + } + h := NewHandler(store, am, nil) + // Authed as different account. + r := authedReq("GET", "/api/peers/peerA-id/connections", "acc2", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusNotFound { + t.Fatalf("want 404, got %d", w.Code) + } +} + +func TestHandler_PostRefresh_Returns202WithToken(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + } + router := &fakeRouter{} + h := NewHandler(store, am, router) + r := authedReq("POST", "/api/peers/peerA-id/connections/refresh", "acc1", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.PostRefresh(w, r) + if w.Code != http.StatusAccepted { + t.Fatalf("want 202, got %d", w.Code) + } + var body refreshResponse + if err := json.NewDecoder(w.Body).Decode(&body); err != nil { + t.Fatal(err) + } + if body.RefreshToken == 0 { + t.Error("want non-zero refresh_token") + } + if router.calls != 1 { + t.Errorf("want 1 SnapshotRequester call, got %d", router.calls) + } +} diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 1a8b83c7eed..d3ba49a1c7e 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -135,7 +135,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } @@ -264,7 +264,7 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 1b77ea3358f..a8df086c808 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -391,7 +391,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil, nil, nil, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index f1d49193cdd..3eea1b5a594 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -257,6 +257,8 @@ func startServer( networkMapController, nil, nil, + nil, + nil, ) if err != nil { t.Fatalf("failed creating management server: %v", err) diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index ac4d0c6d671..44231a270d9 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -94,6 +94,7 @@ type MockAccountManager struct { GetDNSSettingsFunc func(ctx context.Context, accountID, userID string) (*types.DNSSettings, error) SaveDNSSettingsFunc func(ctx context.Context, accountID, userID string, dnsSettingsToSave *types.DNSSettings) error GetPeerFunc func(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + GetPeerByPubKeyFunc func(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) UpdateAccountSettingsFunc func(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) LoginPeerFunc func(ctx context.Context, login types.PeerLogin) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) SyncPeerFunc func(ctx context.Context, sync types.PeerSync, accountID string) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) @@ -820,6 +821,15 @@ func (am *MockAccountManager) GetPeer(ctx context.Context, accountID, peerID, us return nil, status.Errorf(codes.Unimplemented, "method GetPeer is not implemented") } +// GetPeerByPubKey mocks GetPeerByPubKey of the AccountManager interface. +// Phase 3.7i of #5989. +func (am *MockAccountManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + if am.GetPeerByPubKeyFunc != nil { + return am.GetPeerByPubKeyFunc(ctx, accountID, pubKey) + } + return nil, status.Errorf(codes.Unimplemented, "method GetPeerByPubKey is not implemented") +} + // UpdateAccountSettings mocks UpdateAccountSettings of the AccountManager interface func (am *MockAccountManager) UpdateAccountSettings(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) { if am.UpdateAccountSettingsFunc != nil { diff --git a/management/server/peer.go b/management/server/peer.go index 25c6ecd8c57..301f918a143 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -34,7 +34,29 @@ import ( const remoteJobsMinVer = "0.64.0" // GetPeers returns peers visible to the user within an account. -// Users with "peers:read" see all peers. Otherwise, users see only their own peers, or none if restricted by account settings. +// +// Visibility precedence (matches the pre-PR-#6006 behaviour that +// upstream regressed in commit db44848e2 on 2026-04-28): +// +// 1. Users with peers:read permission (admin / owner / auditor / +// network_admin) see ALL account peers. +// 2. Restrictable users with RegularUsersViewBlocked=true see NOTHING. +// 3. Other users (the default "user" role) see THEIR OWN peers PLUS +// any peers reachable from their own peers via the account's +// access policies (Gegenstellen / counterparts -- typically the +// routing peers and other clients in the same access groups). +// +// The "policy-reachable peers" branch was dropped upstream in PR +// #6006 ("Drop netmap calculation on peer read") because the call to +// account.GetPeerConnectionResources() was expensive on large +// accounts. We re-add it under the same /api/peers GET path because +// without it the dashboard becomes useless for a regular user -- +// they can no longer see the routing peers their policies allow them +// to communicate with. The expense is one GetAccountWithBackpressure +// call + one GetPeerConnectionResources iteration per OWN peer, only +// for users who hit this branch (typically <10 own peers per user). +// +// Tracked in docs/bugs/2026-05-04-user-peer-visibility-regression.md. func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) { user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, userID) if err != nil { @@ -59,7 +81,71 @@ func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID return []*nbpeer.Peer{}, nil } - return am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) + ownPeers, err := am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) + if err != nil { + return nil, err + } + + visible, err := am.getUserAccessiblePeers(ctx, accountID, ownPeers) + if err != nil { + return nil, err + } + + return filterPeersByNameAndIP(visible, nameFilter, ipFilter), nil +} + +// filterPeersByNameAndIP applies the same substring matching the SQL store +// applies for the admin path (sql_store.go: WHERE name LIKE %f% AND ip LIKE %f%). +// Empty filters disable that dimension. +func filterPeersByNameAndIP(peers []*nbpeer.Peer, nameFilter, ipFilter string) []*nbpeer.Peer { + if nameFilter == "" && ipFilter == "" { + return peers + } + out := make([]*nbpeer.Peer, 0, len(peers)) + for _, p := range peers { + if nameFilter != "" && !strings.Contains(p.Name, nameFilter) { + continue + } + if ipFilter != "" && !strings.Contains(p.IP.String(), ipFilter) { + continue + } + out = append(out, p) + } + return out +} + +// getUserAccessiblePeers expands the user's own-peer list with every +// peer that any of those peers can reach through the account's +// access policies. The original implementation lived in this file +// before upstream PR #6006 deleted it; restored verbatim modulo the +// nameFilter/ipFilter handling which is enforced upstream by the +// caller filter loop. +func (am *DefaultAccountManager) getUserAccessiblePeers(ctx context.Context, accountID string, ownPeers []*nbpeer.Peer) ([]*nbpeer.Peer, error) { + peersMap := make(map[string]*nbpeer.Peer, len(ownPeers)) + for _, p := range ownPeers { + peersMap[p.ID] = p + } + + account, err := am.requestBuffer.GetAccountWithBackpressure(ctx, accountID) + if err != nil { + return nil, err + } + + approvedPeersMap, err := am.integratedPeerValidator.GetValidatedPeers(ctx, accountID, maps.Values(account.Groups), maps.Values(account.Peers), account.Settings.Extra) + if err != nil { + return nil, err + } + + groupIDToUserIDs := account.GetActiveGroupUsers() + + for _, peer := range ownPeers { + aclPeers, _, _, _ := account.GetPeerConnectionResources(ctx, peer, approvedPeersMap, groupIDToUserIDs) + for _, p := range aclPeers { + peersMap[p.ID] = p + } + } + + return maps.Values(peersMap), nil } // MarkPeerConnected marks peer as connected (true) or disconnected (false) @@ -1194,7 +1280,20 @@ func peerLoginExpired(ctx context.Context, peer *nbpeer.Peer, settings *types.Se } // GetPeer returns a peer visible to the user within an account. -// Users with "peers:read" permission can access any peer. Otherwise, users can access only their own peer. +// +// Visibility precedence mirrors GetPeers (the list endpoint): +// +// 1. Users with peers:read permission can access any peer. +// 2. Admins/service users + the peer owner can access it directly. +// 3. Other users can access the peer iff at least one of THEIR own +// peers is policy-connected to it (the peer is a Gegenstelle / +// counterpart -- typically a routing peer their access policy +// reaches). +// +// Branch 3 was dropped upstream in PR #6006 (commit db44848e2); we +// restore it here so the dashboard can still load the per-peer detail +// page when the user clicks on one of the policy-reachable peers +// returned by GetPeers above. func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) { peer, err := am.Store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { @@ -1214,14 +1313,62 @@ func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, return nil, err } - // if admin or user owns this peer, return peer + // admin/service-user, or the peer owner -- direct access. if user.IsAdminOrServiceUser() || peer.UserID == userID { return peer, nil } + // Otherwise: check whether any of this user's own peers can reach + // the requested peer through the account access policies. + return am.checkIfUserOwnsPeer(ctx, accountID, userID, peer) +} + +// checkIfUserOwnsPeer permits a user to access `peer` if at least one +// of their own peers has a policy-allowed connection to it. Restored +// from the pre-PR-#6006 implementation; see GetPeer for context. +func (am *DefaultAccountManager) checkIfUserOwnsPeer(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error) { + account, err := am.requestBuffer.GetAccountWithBackpressure(ctx, accountID) + if err != nil { + return nil, err + } + + approvedPeersMap, err := am.integratedPeerValidator.GetValidatedPeers(ctx, accountID, maps.Values(account.Groups), maps.Values(account.Peers), account.Settings.Extra) + if err != nil { + return nil, err + } + + userPeers, err := am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) + if err != nil { + return nil, err + } + + groupIDToUserIDs := account.GetActiveGroupUsers() + for _, p := range userPeers { + aclPeers, _, _, _ := account.GetPeerConnectionResources(ctx, p, approvedPeersMap, groupIDToUserIDs) + for _, aclPeer := range aclPeers { + if aclPeer.ID == peer.ID { + return peer, nil + } + } + } + return nil, status.Errorf(status.Internal, "user %s has no access to peer %s under account %s", userID, peer.ID, accountID) } +// GetPeerByPubKey returns the peer with the given WireGuard public key from +// the given account. Phase 3.7i of #5989 — used by REST handlers to enrich +// PeerConnectionMap entries with FQDNs. +func (am *DefaultAccountManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + p, err := am.Store.GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, pubKey) + if err != nil { + return nil, err + } + if p.AccountID != accountID { + return nil, fmt.Errorf("peer with pubkey %s not in account %s", pubKey, accountID) + } + return p, nil +} + // UpdateAccountPeers updates all peers that belong to an account. // Should be called when changes have to be synced to peers. func (am *DefaultAccountManager) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { diff --git a/management/server/peer/peer.go b/management/server/peer/peer.go index db392ddda49..f93d0a13011 100644 --- a/management/server/peer/peer.go +++ b/management/server/peer/peer.go @@ -138,6 +138,20 @@ type PeerSystemMeta struct { //nolint:revive Environment Environment `gorm:"serializer:json"` Flags Flags `gorm:"serializer:json"` Files []File `gorm:"serializer:json"` + + // Phase 3.7i (#5989): peer-self-reported runtime mode/timeouts. Stored + // alongside Hostname/Kernel/etc as meta. Empty when peer pre-dates 3.7i. + EffectiveConnectionMode string `json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `json:"effective_p2p_retry_max_secs,omitempty"` + + // Phase 3.7i (#5989): capability keywords this client build advertises. + // Empty for peers that pre-date the field. Used by mgmt to decide + // whether to send legacy-compat fallback settings (e.g. downgrade to + // p2p-lazy when client lacks "p2p_dynamic"). See + // client/system/features.go for the canonical list. + SupportedFeatures []string `json:"supported_features,omitempty" gorm:"serializer:json"` } func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool { @@ -182,6 +196,11 @@ func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool { p.SystemManufacturer == other.SystemManufacturer && p.Environment.Cloud == other.Environment.Cloud && p.Environment.Platform == other.Environment.Platform && + p.EffectiveConnectionMode == other.EffectiveConnectionMode && + p.EffectiveRelayTimeoutSecs == other.EffectiveRelayTimeoutSecs && + p.EffectiveP2PTimeoutSecs == other.EffectiveP2PTimeoutSecs && + p.EffectiveP2PRetryMaxSecs == other.EffectiveP2PRetryMaxSecs && + slices.Equal(p.SupportedFeatures, other.SupportedFeatures) && p.Flags.isEqual(other.Flags) } diff --git a/management/server/peer/peer_test.go b/management/server/peer/peer_test.go index 1aa3f6ffcea..187f0648c58 100644 --- a/management/server/peer/peer_test.go +++ b/management/server/peer/peer_test.go @@ -141,3 +141,12 @@ func TestFlags_IsEqual(t *testing.T) { }) } } + +func TestPeerSystemMeta_isEqual_ChecksEffectiveFields(t *testing.T) { + base := PeerSystemMeta{Hostname: "h", EffectiveConnectionMode: "p2p-dynamic"} + other := base + other.EffectiveConnectionMode = "p2p" + if base.isEqual(other) { + t.Error("isEqual should return false when EffectiveConnectionMode differs") + } +} diff --git a/management/server/peer_connections/snapshot_router.go b/management/server/peer_connections/snapshot_router.go new file mode 100644 index 00000000000..2daa2b86d8c --- /dev/null +++ b/management/server/peer_connections/snapshot_router.go @@ -0,0 +1,73 @@ +package peer_connections + +import "sync" + +// SnapshotRouter holds per-peer-pubkey send-channels so REST handlers +// can inject a SnapshotRequest into the active Sync server-stream. +// Stream owners (mgmt grpc handleUpdates) Register on stream-start and +// Unregister on stream-close. Phase 3.7i of #5989. +type SnapshotRouter struct { + mu sync.Mutex + channels map[string]chan uint64 +} + +func NewSnapshotRouter() *SnapshotRouter { + return &SnapshotRouter{channels: make(map[string]chan uint64)} +} + +// Register returns a buffered channel the stream owner reads from to +// receive snapshot-request nonces. The returned channel is the token +// the caller must pass to Unregister so a stale stream cannot tear +// down a fresh stream's channel after a quick reconnect. +func (r *SnapshotRouter) Register(peerPubKey string) <-chan uint64 { + r.mu.Lock() + defer r.mu.Unlock() + ch := make(chan uint64, 4) + if old, ok := r.channels[peerPubKey]; ok { + // A second concurrent stream for the same peer (e.g. fast + // reconnect) — close the previous channel so its goroutine + // exits cleanly, then install the new one. + close(old) + } + r.channels[peerPubKey] = ch + return ch +} + +// Unregister closes the given channel (token returned from Register) +// and removes the peer from the router only if that channel is still +// the live one. A stale stream calling Unregister after a fresh stream +// has registered must not tear down the new stream's channel. +// Idempotent. +func (r *SnapshotRouter) Unregister(peerPubKey string, token <-chan uint64) { + r.mu.Lock() + defer r.mu.Unlock() + current, ok := r.channels[peerPubKey] + if !ok { + return + } + if (<-chan uint64)(current) != token { + // A newer Register replaced our channel; that newer Register + // already closed our old channel, so nothing to do here. + return + } + close(current) + delete(r.channels, peerPubKey) +} + +// Request enqueues a nonce for the given peer's snapshot channel. +// Returns true if delivered, false if no active stream for that peer +// or the channel is full (channel capacity 4). +func (r *SnapshotRouter) Request(peerPubKey string, nonce uint64) bool { + r.mu.Lock() + defer r.mu.Unlock() + ch, ok := r.channels[peerPubKey] + if !ok { + return false + } + select { + case ch <- nonce: + return true + default: + return false + } +} diff --git a/management/server/peer_connections/snapshot_router_test.go b/management/server/peer_connections/snapshot_router_test.go new file mode 100644 index 00000000000..617e07dbb0c --- /dev/null +++ b/management/server/peer_connections/snapshot_router_test.go @@ -0,0 +1,62 @@ +package peer_connections + +import "testing" + +func TestSnapshotRouter_RegisterAndRequest(t *testing.T) { + r := NewSnapshotRouter() + ch := r.Register("peerA-pubkey") + if !r.Request("peerA-pubkey", 42) { + t.Fatal("Request should return true for registered peer") + } + select { + case n := <-ch: + if n != 42 { + t.Errorf("want nonce 42, got %d", n) + } + default: + t.Fatal("nonce was not delivered to channel") + } +} + +func TestSnapshotRouter_RequestUnregisteredPeer(t *testing.T) { + r := NewSnapshotRouter() + if r.Request("ghost", 1) { + t.Error("Request for unregistered peer should return false") + } +} + +func TestSnapshotRouter_UnregisterClosesChannel(t *testing.T) { + r := NewSnapshotRouter() + ch := r.Register("peerA") + r.Unregister("peerA", ch) + if _, ok := <-ch; ok { + t.Error("channel should be closed after Unregister") + } +} + +func TestSnapshotRouter_StaleUnregisterDoesNotEvictNewStream(t *testing.T) { + r := NewSnapshotRouter() + old := r.Register("peerA") + // Second Register simulates a fast reconnect: it must close the + // previous channel and replace it. + fresh := r.Register("peerA") + if _, ok := <-old; ok { + t.Error("old channel should be closed when a second Register comes in") + } + // Stale stream calling Unregister with the (now-closed) old token + // must not touch the fresh channel. + r.Unregister("peerA", old) + select { + case _, ok := <-fresh: + if !ok { + t.Error("fresh channel must not be closed by stale Unregister") + } + default: + // expected: channel still open and empty + } + // Proper Unregister with the fresh token tears it down. + r.Unregister("peerA", fresh) + if _, ok := <-fresh; ok { + t.Error("fresh channel should be closed after its own Unregister") + } +} diff --git a/management/server/peer_connections/store.go b/management/server/peer_connections/store.go new file mode 100644 index 00000000000..0914638b71d --- /dev/null +++ b/management/server/peer_connections/store.go @@ -0,0 +1,156 @@ +package peer_connections + +import ( + "sync" + "time" + + "google.golang.org/protobuf/proto" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Clock is the time source MemoryStore consults. Production passes +// realClock{}; tests inject fakeClock to control TTL deterministically. +// Phase 3.7i of #5989. +type Clock interface { + Now() time.Time +} + +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// Store is the interface peer-connections-map storage implementations +// must satisfy. Phase 3.7i ships only MemoryStore. RedisStore is a +// future possibility behind the same interface (deferred). +type Store interface { + Put(peerPubKey string, m *mgmProto.PeerConnectionMap) + Get(peerPubKey string) (*mgmProto.PeerConnectionMap, bool) + GetWithNonceCheck(peerPubKey string, sinceNonce uint64) (*mgmProto.PeerConnectionMap, bool) +} + +// MemoryStore is the in-memory Store implementation. Phase 3.7i. +type MemoryStore struct { + ttl time.Duration + clock Clock + mu sync.Mutex + maps map[string]*memEntry +} + +type memEntry struct { + m *mgmProto.PeerConnectionMap + updatedAt time.Time +} + +// NewMemoryStore returns a MemoryStore using wall-clock time. +func NewMemoryStore(ttl time.Duration) *MemoryStore { + return newMemoryStoreWithClock(ttl, realClock{}) +} + +// newMemoryStoreWithClock is the test-only ctor that lets tests inject a +// fakeClock for deterministic TTL behaviour. +func newMemoryStoreWithClock(ttl time.Duration, clk Clock) *MemoryStore { + return &MemoryStore{ + ttl: ttl, + clock: clk, + maps: make(map[string]*memEntry), + } +} + +// Put stores or merges a connection-map for peerPubKey. +// - full_snapshot=true -> ALWAYS replace, regardless of seq. The +// pusher resets seq to 1 on every daemon-restart, so a fresh full +// snapshot may carry seq=1 against a cached prev.seq=50. +// - delta + no prior entry -> store as-is. +// - delta + prior entry, mismatched session_id -> drop. Codex +// follow-up: SyncPeerConnections is a unary RPC, so a stale +// in-flight retry from the previous daemon process can arrive +// AFTER the new process's full snapshot. Without the session_id +// check, the stale delta's seq (e.g. 51) would beat the new +// process's seq (e.g. 2) and merge old data into the fresh map. +// session_id=0 means a legacy client and falls back to seq-only. +// - delta + prior entry, matching session, out-of-order seq -> drop. +// - delta + prior entry, matching session, in-order seq -> merge. +func (s *MemoryStore) Put(peerPubKey string, m *mgmProto.PeerConnectionMap) { + s.mu.Lock() + defer s.mu.Unlock() + + prev := s.maps[peerPubKey] + if !m.GetFullSnapshot() && prev != nil { + // Drop deltas from a different daemon session than the cached + // state. Both sides must advertise a session_id for the check + // to apply; if either is 0 we fall back to the seq-only path. + if m.GetSessionId() != 0 && prev.m.GetSessionId() != 0 && + m.GetSessionId() != prev.m.GetSessionId() { + return + } + if m.GetSeq() > 0 && m.GetSeq() <= prev.m.GetSeq() { + return + } + } + + stored := proto.Clone(m).(*mgmProto.PeerConnectionMap) + if !m.GetFullSnapshot() && prev != nil { + merged := proto.Clone(prev.m).(*mgmProto.PeerConnectionMap) + merged.Seq = m.GetSeq() + merged.FullSnapshot = false + // Keep the latest non-zero refresh-nonce. A snapshot pushed in + // response to nonce N must remain reachable via GET ?since=N + // even when a regular delta with InResponseToNonce=0 arrives + // shortly after; otherwise the refresh polling client gives up + // and falls back to the next sync interval (~60 s gap). + if m.GetInResponseToNonce() > merged.GetInResponseToNonce() { + merged.InResponseToNonce = m.GetInResponseToNonce() + } + byKey := make(map[string]int, len(merged.Entries)) + for i, e := range merged.Entries { + byKey[e.GetRemotePubkey()] = i + } + for _, ne := range stored.Entries { + if idx, ok := byKey[ne.GetRemotePubkey()]; ok { + merged.Entries[idx] = ne + } else { + merged.Entries = append(merged.Entries, ne) + byKey[ne.GetRemotePubkey()] = len(merged.Entries) - 1 + } + } + stored = merged + } + s.maps[peerPubKey] = &memEntry{m: stored, updatedAt: s.clock.Now()} +} + +// Get returns a deep copy of the cached map for peerPubKey, or false if +// missing or TTL-expired. +func (s *MemoryStore) Get(peerPubKey string) (*mgmProto.PeerConnectionMap, bool) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.maps[peerPubKey] + if !ok { + return nil, false + } + if s.clock.Now().Sub(e.updatedAt) > s.ttl { + delete(s.maps, peerPubKey) + return nil, false + } + return proto.Clone(e.m).(*mgmProto.PeerConnectionMap), true +} + +// GetWithNonceCheck returns the cached map only if its +// InResponseToNonce >= sinceNonce (refresh-flow polling). Same TTL + +// deep-copy semantics as Get. +func (s *MemoryStore) GetWithNonceCheck(peerPubKey string, since uint64) (*mgmProto.PeerConnectionMap, bool) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.maps[peerPubKey] + if !ok { + return nil, false + } + if since > 0 && e.m.GetInResponseToNonce() < since { + return nil, false + } + if s.clock.Now().Sub(e.updatedAt) > s.ttl { + delete(s.maps, peerPubKey) + return nil, false + } + return proto.Clone(e.m).(*mgmProto.PeerConnectionMap), true +} diff --git a/management/server/peer_connections/store_test.go b/management/server/peer_connections/store_test.go new file mode 100644 index 00000000000..a273d8ea055 --- /dev/null +++ b/management/server/peer_connections/store_test.go @@ -0,0 +1,261 @@ +package peer_connections + +import ( + "testing" + "time" + + "google.golang.org/protobuf/types/known/timestamppb" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type fakeClock struct{ now time.Time } + +func (c *fakeClock) Now() time.Time { return c.now } +func (c *fakeClock) advance(d time.Duration) { c.now = c.now.Add(d) } + +func newStoreWithClock(ttl time.Duration) (*MemoryStore, *fakeClock) { + clk := &fakeClock{now: time.Now()} + s := newMemoryStoreWithClock(ttl, clk) + return s, clk +} + +func TestMemoryStore_PutFullThenGet(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 10}}, + }) + got, ok := s.Get("peerA") + if !ok { + t.Fatal("expected entry") + } + if len(got.GetEntries()) != 1 || got.GetEntries()[0].GetRemotePubkey() != "peerB" { + t.Errorf("unexpected entries: %+v", got.GetEntries()) + } +} + +func TestMemoryStore_DeepCopyOnReturn(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "peerB", LastHandshake: timestamppb.New(time.Now())}, + }, + }) + got1, _ := s.Get("peerA") + got1.GetEntries()[0].RemotePubkey = "MUTATED" + got2, _ := s.Get("peerA") + if got2.GetEntries()[0].GetRemotePubkey() != "peerB" { + t.Errorf("Get returned shared pointer; mutation leaked: %s", got2.GetEntries()[0].GetRemotePubkey()) + } +} + +func TestMemoryStore_DeltaMerges(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "peerB", LatencyMs: 10}, + {RemotePubkey: "peerC", LatencyMs: 30}, + }}) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 2, FullSnapshot: false, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 14}}}) + got, _ := s.Get("peerA") + if len(got.GetEntries()) != 2 { + t.Fatalf("want 2 entries, got %d", len(got.GetEntries())) + } + for _, e := range got.GetEntries() { + if e.GetRemotePubkey() == "peerB" && e.GetLatencyMs() != 14 { + t.Errorf("peerB latency not updated: %d", e.GetLatencyMs()) + } + } +} + +func TestMemoryStore_OutOfOrderDeltaDropped(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 5, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 99}}}) + // Stale delta with lower seq must be dropped (in-order seq guarantee + // applies to deltas within a single stream). + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 3, FullSnapshot: false, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 11}}}) + got, _ := s.Get("peerA") + if got.GetSeq() != 5 { + t.Errorf("want seq 5, got %d", got.GetSeq()) + } + if got.GetEntries()[0].GetLatencyMs() != 99 { + t.Errorf("want latency 99 (stale delta dropped), got %d", got.GetEntries()[0].GetLatencyMs()) + } +} + +// TestMemoryStore_FullSnapshotResetsEpoch covers Codex finding 2: the +// pusher resets seq to 1 on every daemon-/stream-restart, so a fresh +// full snapshot may carry seq=1 against a cached prev.seq=50 from the +// previous session. Without the full-snapshot epoch escape, the +// dashboard would stay stale until TTL expiry. +func TestMemoryStore_FullSnapshotResetsEpoch(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + // Old session: pusher reached seq=50 with one peer. + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 50, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "oldPeer", LatencyMs: 100}}}) + // Daemon restart: new session starts fresh at seq=1 with a different + // peer set. Must replace, NOT be dropped. + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "newPeer", LatencyMs: 7}}}) + got, ok := s.Get("peerA") + if !ok { + t.Fatal("expected entry after restart full-snapshot") + } + if got.GetSeq() != 1 { + t.Errorf("want seq 1 (post-restart epoch), got %d", got.GetSeq()) + } + if len(got.GetEntries()) != 1 || got.GetEntries()[0].GetRemotePubkey() != "newPeer" { + t.Errorf("want only newPeer in entries, got %+v", got.GetEntries()) + } + // Subsequent in-order delta (seq=2) from new session must merge. + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 2, FullSnapshot: false, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "newPeer", LatencyMs: 9}}}) + got, _ = s.Get("peerA") + if got.GetEntries()[0].GetLatencyMs() != 9 { + t.Errorf("want latency 9 (delta from new session), got %d", got.GetEntries()[0].GetLatencyMs()) + } +} + +// TestMemoryStore_StaleDeltaFromOldSessionDropped covers the Codex +// follow-up to TestMemoryStore_FullSnapshotResetsEpoch: even with the +// full-snapshot epoch escape, SyncPeerConnections is a UNARY RPC so a +// retried stale delta from the previous daemon process can race past +// the new process's full snapshot. Without the session_id check the +// stale delta's seq beats the new process's seq and merges old data +// into the fresh map. With session_id, mgmt drops it. +func TestMemoryStore_StaleDeltaFromOldSessionDropped(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + const sessionA uint64 = 0xAAAA1111 + const sessionB uint64 = 0xBBBB2222 + + // Old session A reached seq=50, mgmt cached seq=50. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 50, FullSnapshot: true, SessionId: sessionA, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "oldPeer", LatencyMs: 100}, + }, + }) + // Daemon restart: process B sends fresh full snapshot at seq=1. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 1, FullSnapshot: true, SessionId: sessionB, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "newPeer", LatencyMs: 7}, + }, + }) + got, _ := s.Get("peerA") + if got.GetSessionId() != sessionB { + t.Fatalf("want session B after restart, got %x", got.GetSessionId()) + } + + // Stale in-flight delta from process A retries and arrives now. + // seq=51 > current seq=1, but session mismatch must drop it. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 51, FullSnapshot: false, SessionId: sessionA, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "oldPeer", LatencyMs: 999}, + {RemotePubkey: "newPeer", LatencyMs: 999}, + }, + }) + got, _ = s.Get("peerA") + if got.GetSeq() != 1 { + t.Errorf("stale delta from session A leaked: seq advanced to %d", got.GetSeq()) + } + if len(got.GetEntries()) != 1 || got.GetEntries()[0].GetRemotePubkey() != "newPeer" { + t.Errorf("stale delta from session A merged into newPeer map: %+v", got.GetEntries()) + } + if got.GetEntries()[0].GetLatencyMs() != 7 { + t.Errorf("stale delta from session A overwrote newPeer.latency: %d", got.GetEntries()[0].GetLatencyMs()) + } + + // In-order delta from session B (matching session) merges normally. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 2, FullSnapshot: false, SessionId: sessionB, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "newPeer", LatencyMs: 9}, + }, + }) + got, _ = s.Get("peerA") + if got.GetEntries()[0].GetLatencyMs() != 9 { + t.Errorf("in-session delta dropped: latency %d != 9", got.GetEntries()[0].GetLatencyMs()) + } +} + +// Backwards-compat: legacy clients (Phase 3.7i shipped without +// session_id) send 0. Mgmt must keep accepting their deltas under the +// pre-Codex-follow-up seq-only rules so a partial fleet upgrade +// doesn't silently drop pushes. +func TestMemoryStore_LegacyZeroSessionFallsBackToSeqOnly(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + + // Legacy full snapshot, session_id=0. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 5, FullSnapshot: true, SessionId: 0, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 10}}, + }) + // Legacy in-order delta, session_id=0 still. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 6, FullSnapshot: false, SessionId: 0, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 14}}, + }) + got, _ := s.Get("peerA") + if got.GetEntries()[0].GetLatencyMs() != 14 { + t.Errorf("legacy delta dropped under session-id check: latency %d != 14", got.GetEntries()[0].GetLatencyMs()) + } + + // Legacy out-of-order delta still dropped (seq <= prev.seq). + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 4, FullSnapshot: false, SessionId: 0, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 99}}, + }) + got, _ = s.Get("peerA") + if got.GetEntries()[0].GetLatencyMs() != 14 { + t.Errorf("legacy seq-only protection broken: latency %d != 14", got.GetEntries()[0].GetLatencyMs()) + } +} + +// Mixed-version edge: a legacy delta (session_id=0) arriving against a +// session-tagged cached state must NOT be dropped (would be a fleet +// upgrade hazard). Falls back to seq-only. +func TestMemoryStore_MixedSessionAcceptsLegacyDelta(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + const sessionA uint64 = 0x12345 + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 5, FullSnapshot: true, SessionId: sessionA, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 10}}, + }) + // Legacy delta (session_id=0). Must be accepted under the seq check. + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 6, FullSnapshot: false, SessionId: 0, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 22}}, + }) + got, _ := s.Get("peerA") + if got.GetEntries()[0].GetLatencyMs() != 22 { + t.Errorf("mixed-session legacy delta dropped: latency %d != 22", got.GetEntries()[0].GetLatencyMs()) + } +} + +func TestMemoryStore_TTLExpires(t *testing.T) { + s, clk := newStoreWithClock(50 * time.Millisecond) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true}) + clk.advance(60 * time.Millisecond) + if _, ok := s.Get("peerA"); ok { + t.Error("expected TTL-expired entry to be gone") + } +} + +func TestMemoryStore_NonceCheck(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true, InResponseToNonce: 0}) + if _, ok := s.GetWithNonceCheck("peerA", 5); ok { + t.Error("expected GetWithNonceCheck to refuse stale data when sinceNonce > InResponseToNonce") + } + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 2, FullSnapshot: true, InResponseToNonce: 5}) + if _, ok := s.GetWithNonceCheck("peerA", 5); !ok { + t.Error("expected GetWithNonceCheck to return when InResponseToNonce >= sinceNonce") + } +} diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 36809d354f2..75b9a595099 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -559,10 +559,34 @@ func TestDefaultAccountManager_GetPeer(t *testing.T) { } assert.NotNil(t, peer) - // the user can NOT see peer2 because it is not owned by them. - // Regular users only see peers they directly own. + // Restored pre-PR-#6006 behaviour: the user can ALSO see peer2 because + // the default account policy (All -> All) lets peer1 reach peer2 via + // ACL. Regular users see their own peers PLUS peers reachable from + // their own peers via the account's access policies. See the + // docs/bugs/2026-05-04-user-peer-visibility-regression.md analysis + // for the rationale. + peer, err = manager.GetPeer(context.Background(), accountID, peer2.ID, someUser) + if err != nil { + t.Fatalf("regular user should see policy-reachable peer2: %v", err) + return + } + assert.NotNil(t, peer) + + // Now strip the default policies so peer1 has NO policy reaching peer2. + // In that case the regular user must NOT be able to read peer2 -- this + // is the "policy denies" half of the contract restored by the fix. + account, err = manager.Store.GetAccount(context.Background(), accountID) + if err != nil { + t.Fatal(err) + return + } + account.Policies = []*types.Policy{} + if err := manager.Store.SaveAccount(context.Background(), account); err != nil { + t.Fatal(err) + return + } _, err = manager.GetPeer(context.Background(), accountID, peer2.ID, someUser) - assert.Error(t, err) + assert.Error(t, err, "regular user must NOT see peer2 once no policy reaches it from peer1") // admin users can always access all the peers peer, err = manager.GetPeer(context.Background(), accountID, peer1.ID, adminUser) @@ -729,6 +753,37 @@ func TestDefaultAccountManager_GetPeers(t *testing.T) { } } +func TestFilterPeersByNameAndIP(t *testing.T) { + peers := []*nbpeer.Peer{ + {ID: "1", Name: "alpha", IP: net.ParseIP("10.0.0.1")}, + {ID: "2", Name: "beta", IP: net.ParseIP("10.0.0.2")}, + {ID: "3", Name: "alpha-2", IP: net.ParseIP("10.0.1.5")}, + {ID: "4", Name: "gamma", IP: net.ParseIP("192.168.1.1")}, + } + + t.Run("no filters", func(t *testing.T) { + got := filterPeersByNameAndIP(peers, "", "") + assert.Len(t, got, 4) + }) + t.Run("name substring", func(t *testing.T) { + got := filterPeersByNameAndIP(peers, "alpha", "") + assert.Len(t, got, 2) + }) + t.Run("ip substring", func(t *testing.T) { + got := filterPeersByNameAndIP(peers, "", "10.0.0.") + assert.Len(t, got, 2) + }) + t.Run("name AND ip both required", func(t *testing.T) { + got := filterPeersByNameAndIP(peers, "alpha", "10.0.1.") + assert.Len(t, got, 1) + assert.Equal(t, "3", got[0].ID) + }) + t.Run("no matches", func(t *testing.T) { + got := filterPeersByNameAndIP(peers, "zeta", "") + assert.Empty(t, got) + }) +} + func setupTestAccountManager(b testing.TB, peers int, groups int) (*DefaultAccountManager, *update_channel.PeersUpdateManager, string, string, error) { b.Helper() @@ -1156,7 +1211,7 @@ func TestToSyncResponse(t *testing.T) { } dnsCache := &cache.DNSConfigCache{} accountSettings := &types.Settings{RoutingPeerDNSResolutionEnabled: true} - response := grpc.ToSyncResponse(context.Background(), config, config.HttpConfig, config.DeviceAuthorizationFlow, peer, turnRelayToken, turnRelayToken, networkMap, dnsName, checks, dnsCache, accountSettings, nil, []string{}, int64(dnsForwarderPort)) + response := grpc.ToSyncResponse(context.Background(), config, config.HttpConfig, config.DeviceAuthorizationFlow, peer, turnRelayToken, turnRelayToken, networkMap, dnsName, checks, dnsCache, accountSettings, nil, []string{}, int64(dnsForwarderPort), nil) assert.NotNil(t, response) // assert peer config diff --git a/management/server/store/file_store.go b/management/server/store/file_store.go index 81185b020fe..82e00881012 100644 --- a/management/server/store/file_store.go +++ b/management/server/store/file_store.go @@ -104,6 +104,12 @@ func restore(ctx context.Context, file string) (*FileStore, error) { RoutingPeerDNSResolutionEnabled: true, } } + // Phase 3.7i (#5989): the FileStore is the legacy on-disk JSON + // backend. Existing JSON files predate the LegacyLazyFallback* + // fields entirely (they unmarshal as the Go zero value: + // false / 0). Apply the defaults on every load so a JSON + // account doesn't silently disable the fallback after upgrade. + account.Settings.ApplyLegacyLazyFallbackDefaults() for setupKeyId := range account.SetupKeys { store.SetupKeyID2AccountID[strings.ToUpper(setupKeyId)] = accountID diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 1fa3d08ee5e..3460736faf3 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -1513,6 +1513,17 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc settings_jwt_groups_enabled, settings_jwt_groups_claim_name, settings_jwt_allow_groups, settings_routing_peer_dns_resolution_enabled, settings_dns_domain, settings_network_range, settings_lazy_connection_enabled, + -- Phase-3.7i (#5989) connection-mode columns. The pgx fast + -- path must SELECT these or new modes silently regress to the + -- legacy LazyConnectionEnabled bool, which clients then + -- interpret as ModeP2P (eager) -- defeating the picker. + settings_connection_mode, + settings_relay_timeout_seconds, + settings_p2p_timeout_seconds, + settings_p2p_retry_max_seconds, + -- Phase-3.7i (#5989) legacy-client compatibility settings. + settings_legacy_lazy_fallback_enabled, + settings_legacy_lazy_fallback_timeout_seconds, -- Embedded ExtraSettings settings_extra_peer_approval_enabled, settings_extra_user_approval_required, settings_extra_integrated_validator, settings_extra_integrated_validator_groups @@ -1532,6 +1543,12 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc sDNSDomain sql.NullString sNetworkRange sql.NullString sLazyConnectionEnabled sql.NullBool + sConnectionMode sql.NullString + sRelayTimeoutSeconds sql.NullInt64 + sP2pTimeoutSeconds sql.NullInt64 + sP2pRetryMaxSeconds sql.NullInt64 + sLegacyLazyFallbackEnabled sql.NullBool + sLegacyLazyFallbackTimeoutSecs sql.NullInt64 sExtraPeerApprovalEnabled sql.NullBool sExtraUserApprovalRequired sql.NullBool sExtraIntegratedValidator sql.NullString @@ -1553,6 +1570,8 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc &sJWTGroupsEnabled, &sJWTGroupsClaimName, &sJWTAllowGroups, &sRoutingPeerDNSResolutionEnabled, &sDNSDomain, &sNetworkRange, &sLazyConnectionEnabled, + &sConnectionMode, &sRelayTimeoutSeconds, &sP2pTimeoutSeconds, &sP2pRetryMaxSeconds, + &sLegacyLazyFallbackEnabled, &sLegacyLazyFallbackTimeoutSecs, &sExtraPeerApprovalEnabled, &sExtraUserApprovalRequired, &sExtraIntegratedValidator, &sExtraIntegratedValidatorGroups, ) @@ -1615,6 +1634,33 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc if sLazyConnectionEnabled.Valid { account.Settings.LazyConnectionEnabled = sLazyConnectionEnabled.Bool } + if sConnectionMode.Valid { + v := sConnectionMode.String + account.Settings.ConnectionMode = &v + } + if sRelayTimeoutSeconds.Valid { + v := uint32(sRelayTimeoutSeconds.Int64) + account.Settings.RelayTimeoutSeconds = &v + } + if sP2pTimeoutSeconds.Valid { + v := uint32(sP2pTimeoutSeconds.Int64) + account.Settings.P2pTimeoutSeconds = &v + } + if sP2pRetryMaxSeconds.Valid { + v := uint32(sP2pRetryMaxSeconds.Int64) + account.Settings.P2pRetryMaxSeconds = &v + } + if sLegacyLazyFallbackEnabled.Valid { + account.Settings.LegacyLazyFallbackEnabled = sLegacyLazyFallbackEnabled.Bool + } else { + // Pre-3.7i row in DB - default to enabled (matches GORM default). + account.Settings.LegacyLazyFallbackEnabled = true + } + if sLegacyLazyFallbackTimeoutSecs.Valid { + account.Settings.LegacyLazyFallbackTimeoutSeconds = uint32(sLegacyLazyFallbackTimeoutSecs.Int64) + } else { + account.Settings.LegacyLazyFallbackTimeoutSeconds = 3600 + } if sJWTAllowGroups.Valid { _ = json.Unmarshal([]byte(sJWTAllowGroups.String), &account.Settings.JWTAllowGroups) } @@ -1701,13 +1747,29 @@ func (s *SqlStore) getSetupKeys(ctx context.Context, accountID string) ([]types. } func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Peer, error) { + // Phase-3.7i (#5989): the pgx fast-path MUST select meta_supported_features + // and meta_effective_* alongside the other meta_* columns. Forgetting them + // here causes every loaded peer to come back with Meta.SupportedFeatures = + // nil, which makes toPeerConfig's legacy-fallback check + // slices.Contains(peer.Meta.SupportedFeatures, "p2p_dynamic") + // return false for EVERY 3.7i+ client. The server then silently downgrades + // p2p-dynamic -> p2p-lazy on the second NetworkMap push (~5s after the + // initial Login response, which uses the in-memory peer with fresh meta). + // Symptom: "lazy/dynamic mode change p2p-dynamic -> p2p-lazy" 5s after + // every Login, observed on uray-mic-d4 and ctb50-d. + // The same trap is documented for settings_connection_mode 30 lines above + // in getAccount(); applies analogously here. const query = `SELECT id, account_id, key, ip, name, dns_label, user_id, ssh_key, ssh_enabled, login_expiration_enabled, - inactivity_expiration_enabled, last_login, created_at, ephemeral, extra_dns_labels, allow_extra_dns_labels, meta_hostname, - meta_go_os, meta_kernel, meta_core, meta_platform, meta_os, meta_os_version, meta_wt_version, meta_ui_version, + inactivity_expiration_enabled, last_login, created_at, ephemeral, extra_dns_labels, allow_extra_dns_labels, meta_hostname, + meta_go_os, meta_kernel, meta_core, meta_platform, meta_os, meta_os_version, meta_wt_version, meta_ui_version, meta_kernel_version, meta_network_addresses, meta_system_serial_number, meta_system_product_name, meta_system_manufacturer, - meta_environment, meta_flags, meta_files, peer_status_last_seen, peer_status_connected, peer_status_login_expired, - peer_status_requires_approval, location_connection_ip, location_country_code, location_city_name, - location_geo_name_id, proxy_meta_embedded, proxy_meta_cluster FROM peers WHERE account_id = $1` + meta_environment, meta_flags, meta_files, peer_status_last_seen, peer_status_connected, peer_status_login_expired, + peer_status_requires_approval, location_connection_ip, location_country_code, location_city_name, + location_geo_name_id, proxy_meta_embedded, proxy_meta_cluster, + meta_effective_connection_mode, meta_effective_relay_timeout_secs, + meta_effective_p2_p_timeout_secs, meta_effective_p2_p_retry_max_secs, + meta_supported_features + FROM peers WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) if err != nil { return nil, err @@ -1727,6 +1789,10 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee metaSystemSerialNumber, metaSystemProductName, metaSystemManufacturer sql.NullString locationCountryCode, locationCityName, proxyCluster sql.NullString locationGeoNameID sql.NullInt64 + // Phase-3.7i (#5989) connection-mode + capability columns. + metaEffectiveConnectionMode sql.NullString + metaEffectiveRelayTimeoutSecs, metaEffectiveP2PTimeoutSecs, metaEffectiveP2PRetryMaxSecs sql.NullInt64 + metaSupportedFeatures []byte ) err := row.Scan(&p.ID, &p.AccountID, &p.Key, &ip, &p.Name, &p.DNSLabel, &p.UserID, &p.SSHKey, &sshEnabled, @@ -1735,7 +1801,10 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee &metaOS, &metaOSVersion, &metaWtVersion, &metaUIVersion, &metaKernelVersion, &netAddr, &metaSystemSerialNumber, &metaSystemProductName, &metaSystemManufacturer, &env, &flags, &files, &peerStatusLastSeen, &peerStatusConnected, &peerStatusLoginExpired, &peerStatusRequiresApproval, &connIP, - &locationCountryCode, &locationCityName, &locationGeoNameID, &proxyEmbedded, &proxyCluster) + &locationCountryCode, &locationCityName, &locationGeoNameID, &proxyEmbedded, &proxyCluster, + &metaEffectiveConnectionMode, &metaEffectiveRelayTimeoutSecs, + &metaEffectiveP2PTimeoutSecs, &metaEffectiveP2PRetryMaxSecs, + &metaSupportedFeatures) if err == nil { if lastLogin.Valid { @@ -1846,6 +1915,22 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee if connIP != nil { _ = json.Unmarshal(connIP, &p.Location.ConnectionIP) } + // Phase-3.7i (#5989) effective-mode + capabilities. + if metaEffectiveConnectionMode.Valid { + p.Meta.EffectiveConnectionMode = metaEffectiveConnectionMode.String + } + if metaEffectiveRelayTimeoutSecs.Valid { + p.Meta.EffectiveRelayTimeoutSecs = uint32(metaEffectiveRelayTimeoutSecs.Int64) + } + if metaEffectiveP2PTimeoutSecs.Valid { + p.Meta.EffectiveP2PTimeoutSecs = uint32(metaEffectiveP2PTimeoutSecs.Int64) + } + if metaEffectiveP2PRetryMaxSecs.Valid { + p.Meta.EffectiveP2PRetryMaxSecs = uint32(metaEffectiveP2PRetryMaxSecs.Int64) + } + if metaSupportedFeatures != nil { + _ = json.Unmarshal(metaSupportedFeatures, &p.Meta.SupportedFeatures) + } } return p, err }) diff --git a/management/server/types/settings.go b/management/server/types/settings.go index 4ea79ec72fc..fc0f6e91fba 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -58,6 +58,50 @@ type Settings struct { // LazyConnectionEnabled indicates if the experimental feature is enabled or disabled LazyConnectionEnabled bool `gorm:"default:false"` + // ConnectionMode is the account-wide default connection mode (Phase 1 + // of issue #5989). Nullable: NULL means "fall back to LazyConnectionEnabled". + // Stored as the canonical lower-kebab-case string (e.g. "p2p-lazy"). + ConnectionMode *string `gorm:"type:varchar(32);default:null"` + + // RelayTimeoutSeconds, when non-NULL, overrides the built-in default + // (5 min). 0 = "never tear down". Nullable to distinguish "use default" + // from "explicit 0". + RelayTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pTimeoutSeconds is reserved for Phase 2; same nullable semantics. + // Built-in default in Phase 1: 180 min, but not yet effective. + P2pTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pRetryMaxSeconds is reserved for Phase 3 (#5989). Caps the ICE- + // failure backoff sequence in p2p-dynamic mode. NULL = use daemon's + // built-in default (900s = 15 min). 0 = disable backoff (treated + // internally as "user-explicit-disable" via uint32-max sentinel on + // the wire). + P2pRetryMaxSeconds *uint32 `gorm:"default:null"` + + // LegacyLazyFallbackEnabled (Phase 3.7i, #5989) controls whether the + // management server downgrades clients that do NOT advertise the + // "p2p_dynamic" capability to p2p-lazy when the account is in + // p2p-dynamic mode. Defaults to true so that pre-3.7i clients keep + // behaving sanely after an admin flips ConnectionMode to p2p-dynamic. + // Set to false to send the raw p2p-dynamic config to all clients + // (advanced; only useful when you know the entire fleet is upgraded). + // No effect outside p2p-dynamic mode. + // + // Use ApplyLegacyLazyFallbackDefaults to seed the field correctly + // in places that build a fresh Settings from scratch (PUT handler, + // account creation, in-memory FileStore migration). The GORM + // `default:true` only fires for SQL inserts. + LegacyLazyFallbackEnabled bool `gorm:"default:true"` + + // LegacyLazyFallbackTimeoutSeconds (Phase 3.7i, #5989) is the relay + // inactivity timeout sent to legacy clients via the lazy-fallback + // branch. Default 3600s (60 min) - long enough to not hammer + // connection setup on flaky LTE links, short enough to actually + // release idle peers. Must be in [60, 86400]; validated by the + // HTTP API handler. + LegacyLazyFallbackTimeoutSeconds uint32 `gorm:"default:3600"` + // AutoUpdateVersion client auto-update version AutoUpdateVersion string `gorm:"default:'disabled'"` @@ -91,7 +135,13 @@ func (s *Settings) Copy() *Settings { RoutingPeerDNSResolutionEnabled: s.RoutingPeerDNSResolutionEnabled, PeerExposeEnabled: s.PeerExposeEnabled, PeerExposeGroups: slices.Clone(s.PeerExposeGroups), - LazyConnectionEnabled: s.LazyConnectionEnabled, + LazyConnectionEnabled: s.LazyConnectionEnabled, + ConnectionMode: cloneStringPtr(s.ConnectionMode), + RelayTimeoutSeconds: cloneUint32Ptr(s.RelayTimeoutSeconds), + P2pTimeoutSeconds: cloneUint32Ptr(s.P2pTimeoutSeconds), + P2pRetryMaxSeconds: cloneUint32Ptr(s.P2pRetryMaxSeconds), + LegacyLazyFallbackEnabled: s.LegacyLazyFallbackEnabled, + LegacyLazyFallbackTimeoutSeconds: s.LegacyLazyFallbackTimeoutSeconds, DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, AutoUpdateVersion: s.AutoUpdateVersion, @@ -138,3 +188,83 @@ func (e *ExtraSettings) Copy() *ExtraSettings { FlowDnsCollectionEnabled: e.FlowDnsCollectionEnabled, } } + +// cloneStringPtr returns a deep copy of a *string (nil-safe). Used by +// Settings.Copy for the new nullable ConnectionMode field. +func cloneStringPtr(p *string) *string { + if p == nil { + return nil + } + v := *p + return &v +} + +// cloneUint32Ptr returns a deep copy of a *uint32 (nil-safe). Used by +// Settings.Copy for the new nullable timeout fields. +func cloneUint32Ptr(p *uint32) *uint32 { + if p == nil { + return nil + } + v := *p + return &v +} + +// StringPtrEqual nil-safe equality for *string. Used to detect changes +// in nullable settings fields when deciding whether to push updated +// PeerConfig to live clients (account.updateAccountPeers). +func StringPtrEqual(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// Uint32PtrEqual nil-safe equality for *uint32. Same purpose as +// StringPtrEqual for the new timeout fields. +func Uint32PtrEqual(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// Phase 3.7i (#5989): canonical defaults for the LegacyLazyFallback* +// fields. Centralised so every code path that builds a Settings from +// scratch lands on the same numbers. The GORM `default:` tags only +// apply at INSERT time, so callers that mutate Settings in memory +// (PUT handler, account creation, FileStore migration) must call +// ApplyLegacyLazyFallbackDefaults explicitly. +const ( + DefaultLegacyLazyFallbackEnabled = true + DefaultLegacyLazyFallbackTimeoutSeconds = uint32(3600) +) + +// ApplyLegacyLazyFallbackDefaults seeds the two LegacyLazyFallback* +// fields if they are at the Go zero value. Idempotent — calling it on +// an already-populated Settings is a no-op. The "is at zero value" +// detection is intentionally simple: there is no semantic difference +// between "user explicitly turned the toggle off / set timeout to 0" +// and "field uninitialised", because we forbid 0 timeouts at the API +// layer (range [60, 86400]) and the false toggle case is preserved +// only when the field was already true and got copied verbatim. New +// codepaths that need to remember "user opted out" should use the API +// handler's path (which only ever sees the wire field). +func (s *Settings) ApplyLegacyLazyFallbackDefaults() { + if s == nil { + return + } + // timeout==0 is never valid, so we always rewrite. Toggle: only + // reset to default true when the timeout was also zero (= field + // freshly built, never touched), otherwise honour the explicit + // false the caller put there. + if s.LegacyLazyFallbackTimeoutSeconds == 0 { + s.LegacyLazyFallbackEnabled = DefaultLegacyLazyFallbackEnabled + s.LegacyLazyFallbackTimeoutSeconds = DefaultLegacyLazyFallbackTimeoutSeconds + } +} diff --git a/management/server/types/settings_ptr_equal_test.go b/management/server/types/settings_ptr_equal_test.go new file mode 100644 index 00000000000..19a8fb1a40f --- /dev/null +++ b/management/server/types/settings_ptr_equal_test.go @@ -0,0 +1,92 @@ +package types + +import "testing" + +// Codex review: account.go:332 updateAccountPeers used to silently +// miss ConnectionMode + timeout-field changes. The fix relies on +// these nil-safe equality helpers — make sure they cover the cases. +func TestStringPtrEqual(t *testing.T) { + a := "p2p-dynamic" + b := "p2p-dynamic" + c := "p2p-lazy" + tests := []struct { + name string + x *string + y *string + want bool + }{ + {"both_nil", nil, nil, true}, + {"first_nil", nil, &a, false}, + {"second_nil", &a, nil, false}, + {"same_value", &a, &b, true}, + {"different_value", &a, &c, false}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := StringPtrEqual(tc.x, tc.y); got != tc.want { + t.Errorf("StringPtrEqual(%v, %v) = %v, want %v", deref(tc.x), deref(tc.y), got, tc.want) + } + }) + } +} + +func TestUint32PtrEqual(t *testing.T) { + a := uint32(86400) + b := uint32(86400) + c := uint32(300) + tests := []struct { + name string + x *uint32 + y *uint32 + want bool + }{ + {"both_nil", nil, nil, true}, + {"first_nil", nil, &a, false}, + {"second_nil", &a, nil, false}, + {"same_value", &a, &b, true}, + {"different_value", &a, &c, false}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := Uint32PtrEqual(tc.x, tc.y); got != tc.want { + t.Errorf("Uint32PtrEqual(%v, %v) = %v, want %v", derefU(tc.x), derefU(tc.y), got, tc.want) + } + }) + } +} + +// Real-world scenario lifted from the bug Codex caught: dashboard +// switches an account from p2p-lazy to p2p-dynamic while leaving +// every other setting alone. Settings.Copy must produce a updated object +// that compares unequal on ConnectionMode (the trigger for the +// updateAccountPeers push) and equal on everything else. +func TestSettings_PushTriggerOnConnectionModeFlip(t *testing.T) { + lazy := "p2p-lazy" + dynamic := "p2p-dynamic" + relayTO := uint32(86400) + old := &Settings{ + ConnectionMode: &lazy, + RelayTimeoutSeconds: &relayTO, + } + updated := old.Copy() + updated.ConnectionMode = &dynamic + if StringPtrEqual(old.ConnectionMode, updated.ConnectionMode) { + t.Fatal("ConnectionMode flip must NOT be equal (otherwise updateAccountPeers won't fire)") + } + if !Uint32PtrEqual(old.RelayTimeoutSeconds, updated.RelayTimeoutSeconds) { + t.Fatal("RelayTimeoutSeconds unchanged must remain equal across Copy()") + } +} + +func deref(p *string) string { + if p == nil { + return "" + } + return *p +} +func derefU(p *uint32) any { + if p == nil { + return "" + } + return *p +} diff --git a/management/server/types/settings_test.go b/management/server/types/settings_test.go new file mode 100644 index 00000000000..0e8c8cf7220 --- /dev/null +++ b/management/server/types/settings_test.go @@ -0,0 +1,39 @@ +package types + +import "testing" + +func TestSettings_Copy_P2pRetryMaxSeconds(t *testing.T) { + v := uint32(900) + src := &Settings{P2pRetryMaxSeconds: &v} + dst := src.Copy() + if dst.P2pRetryMaxSeconds == nil { + t.Fatal("Copy lost P2pRetryMaxSeconds pointer") + } + if *dst.P2pRetryMaxSeconds != 900 { + t.Fatalf("expected 900, got %d", *dst.P2pRetryMaxSeconds) + } + // Verify it's a deep copy (different pointers) + *dst.P2pRetryMaxSeconds = 600 + if *src.P2pRetryMaxSeconds != 900 { + t.Fatal("Copy did not deep-clone P2pRetryMaxSeconds") + } +} + +// Phase 3.7i (#5989): make sure Settings.Copy carries the new +// LegacyLazyFallback* fields. Forgetting either would silently reset +// the toggle / timeout to zero values whenever Copy is called (e.g. +// in the equality check on UpdateAccountSettings). +func TestSettings_Copy_LegacyLazyFallback(t *testing.T) { + src := &Settings{ + LegacyLazyFallbackEnabled: true, + LegacyLazyFallbackTimeoutSeconds: 1800, + } + dst := src.Copy() + if !dst.LegacyLazyFallbackEnabled { + t.Fatal("Copy lost LegacyLazyFallbackEnabled (got false, want true)") + } + if dst.LegacyLazyFallbackTimeoutSeconds != 1800 { + t.Fatalf("Copy lost LegacyLazyFallbackTimeoutSeconds: got %d, want 1800", + dst.LegacyLazyFallbackTimeoutSeconds) + } +} diff --git a/shared/connectionmode/mode.go b/shared/connectionmode/mode.go new file mode 100644 index 00000000000..d3b1c9e14e4 --- /dev/null +++ b/shared/connectionmode/mode.go @@ -0,0 +1,128 @@ +// Package connectionmode defines the Mode type used to control how a peer +// establishes connections to other peers. Introduced in Phase 1 of the +// connection-mode consolidation (issue #5989) to replace the historical +// pair (NB_FORCE_RELAY, NB_ENABLE_EXPERIMENTAL_LAZY_CONN). +package connectionmode + +import ( + "fmt" + "strings" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Mode is a connection mode for peer-to-peer (or relay-only) connections. +// ModeUnspecified is the zero value and indicates "fall back to the next +// resolution source" (env -> config -> server-pushed -> legacy bool). +type Mode int + +const ( + ModeUnspecified Mode = iota + ModeRelayForced + ModeP2P + ModeP2PLazy + ModeP2PDynamic + // ModeFollowServer is a client-side sentinel: setting this in the + // client config explicitly clears any local override so the + // server-pushed value (or its legacy fallback) is used. It MUST NOT + // be sent on the wire -- ToProto returns UNSPECIFIED for it. + ModeFollowServer +) + +// String returns the canonical lower-kebab-case name of the mode. +func (m Mode) String() string { + switch m { + case ModeRelayForced: + return "relay-forced" + case ModeP2P: + return "p2p" + case ModeP2PLazy: + return "p2p-lazy" + case ModeP2PDynamic: + return "p2p-dynamic" + case ModeFollowServer: + return "follow-server" + default: + return "" + } +} + +// ParseString accepts the canonical name (case-insensitive, surrounding +// whitespace tolerated) and returns the corresponding Mode. Empty input +// returns ModeUnspecified with no error. Unknown input returns +// ModeUnspecified with an error. +func ParseString(s string) (Mode, error) { + switch strings.ToLower(strings.TrimSpace(s)) { + case "": + return ModeUnspecified, nil + case "relay-forced": + return ModeRelayForced, nil + case "p2p": + return ModeP2P, nil + case "p2p-lazy": + return ModeP2PLazy, nil + case "p2p-dynamic": + return ModeP2PDynamic, nil + case "follow-server": + return ModeFollowServer, nil + default: + return ModeUnspecified, fmt.Errorf("unknown connection mode %q", s) + } +} + +// FromProto translates a proto enum value to the internal Mode. +func FromProto(m mgmProto.ConnectionMode) Mode { + switch m { + case mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED: + return ModeRelayForced + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P: + return ModeP2P + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY: + return ModeP2PLazy + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC: + return ModeP2PDynamic + default: + return ModeUnspecified + } +} + +// ToProto translates the internal Mode to a proto enum value. +// ModeFollowServer is a client-side concept and intentionally maps to +// UNSPECIFIED so it never appears on the wire. +func (m Mode) ToProto() mgmProto.ConnectionMode { + switch m { + case ModeRelayForced: + return mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED + case ModeP2P: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P + case ModeP2PLazy: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY + case ModeP2PDynamic: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC + default: + return mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED + } +} + +// ResolveLegacyLazyBool maps the historical Settings.LazyConnectionEnabled +// boolean to the new Mode. Used when a new client receives an old server's +// PeerConfig (ConnectionMode = UNSPECIFIED) or when the management server +// has no explicit Settings.ConnectionMode set yet. +func ResolveLegacyLazyBool(lazy bool) Mode { + if lazy { + return ModeP2PLazy + } + return ModeP2P +} + +// ToLazyConnectionEnabled is the inverse mapping for backwards-compat. +// Used by toPeerConfig() so old clients (which only know the boolean) +// still get a sensible behaviour. +// +// Note: ModeRelayForced cannot be expressed via the legacy boolean and +// falls back to false. This is a structural compat gap documented in the +// release notes; admins must set NB_FORCE_RELAY=true on old clients +// or upgrade them. +func (m Mode) ToLazyConnectionEnabled() bool { + return m == ModeP2PLazy +} diff --git a/shared/connectionmode/mode_test.go b/shared/connectionmode/mode_test.go new file mode 100644 index 00000000000..01a9c11c929 --- /dev/null +++ b/shared/connectionmode/mode_test.go @@ -0,0 +1,106 @@ +package connectionmode + +import ( + "testing" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestParseString(t *testing.T) { + cases := []struct { + input string + want Mode + wantErr bool + }{ + {"relay-forced", ModeRelayForced, false}, + {"p2p", ModeP2P, false}, + {"p2p-lazy", ModeP2PLazy, false}, + {"p2p-dynamic", ModeP2PDynamic, false}, + {"follow-server", ModeFollowServer, false}, + {"", ModeUnspecified, false}, + {"P2P", ModeP2P, false}, + {" p2p-lazy ", ModeP2PLazy, false}, + {"junk", ModeUnspecified, true}, + } + for _, c := range cases { + got, err := ParseString(c.input) + if (err != nil) != c.wantErr { + t.Errorf("ParseString(%q): err=%v wantErr=%v", c.input, err, c.wantErr) + continue + } + if got != c.want { + t.Errorf("ParseString(%q) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestFromProto(t *testing.T) { + cases := []struct { + input mgmProto.ConnectionMode + want Mode + }{ + {mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED, ModeUnspecified}, + {mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, ModeRelayForced}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P, ModeP2P}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, ModeP2PLazy}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, ModeP2PDynamic}, + } + for _, c := range cases { + got := FromProto(c.input) + if got != c.want { + t.Errorf("FromProto(%v) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestToProto(t *testing.T) { + for _, m := range []Mode{ModeUnspecified, ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic} { + got := FromProto(m.ToProto()) + if got != m { + t.Errorf("round-trip Mode %v -> proto -> Mode = %v", m, got) + } + } + if got := ModeFollowServer.ToProto(); got != mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED { + t.Errorf("ModeFollowServer.ToProto() = %v, want UNSPECIFIED", got) + } +} + +func TestResolveLegacyLazyBool(t *testing.T) { + if got := ResolveLegacyLazyBool(true); got != ModeP2PLazy { + t.Errorf("ResolveLegacyLazyBool(true) = %v, want ModeP2PLazy", got) + } + if got := ResolveLegacyLazyBool(false); got != ModeP2P { + t.Errorf("ResolveLegacyLazyBool(false) = %v, want ModeP2P", got) + } +} + +func TestToLazyConnectionEnabled(t *testing.T) { + cases := []struct { + mode Mode + want bool + }{ + {ModeRelayForced, false}, + {ModeP2P, false}, + {ModeP2PLazy, true}, + {ModeP2PDynamic, false}, + {ModeUnspecified, false}, + } + for _, c := range cases { + got := c.mode.ToLazyConnectionEnabled() + if got != c.want { + t.Errorf("Mode %v ToLazyConnectionEnabled() = %v, want %v", c.mode, got, c.want) + } + } +} + +func TestStringRoundTrip(t *testing.T) { + for _, m := range []Mode{ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic, ModeFollowServer} { + got, err := ParseString(m.String()) + if err != nil { + t.Errorf("round-trip parse of %v.String() failed: %v", m, err) + } + if got != m { + t.Errorf("round-trip %v -> %q -> %v", m, m.String(), got) + } + } +} diff --git a/shared/management/client/client.go b/shared/management/client/client.go index 18efba87b87..e9354d1e521 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -27,6 +27,17 @@ type Client interface { // Used to validate connectivity before committing configuration changes. HealthCheck() error SyncMeta(sysInfo *system.Info) error + // SyncPeerConnections sends the peer's current per-peer connection map + // to the management server as a unary RPC. Phase 3.7i of #5989. + SyncPeerConnections(ctx context.Context, m *proto.PeerConnectionMap) error + // SetEffectiveConnConfig records the engine-resolved connection mode/ + // timeouts to report in subsequent Sync/Login/SyncMeta PeerSystemMeta. + // Phase 3.7i of #5989. + SetEffectiveConnConfig(eff EffectiveConnConfig) + // SetSnapshotRequestHandler registers a callback invoked when the + // management server sends a SnapshotRequest over the Sync server-stream. + // Phase 3.7i of #5989. + SetSnapshotRequestHandler(fn func(nonce uint64)) Logout() error CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) RenewExpose(ctx context.Context, domain string) error diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index a8e8172dc88..05b684eaf31 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/management-integrations/integrations" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" @@ -138,7 +139,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(time.Hour), peer_connections.NewSnapshotRouter()) if err != nil { t.Fatal(err) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 80625fe06c3..a24892c5104 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -55,6 +55,13 @@ type GrpcClient struct { connStateCallback ConnStateNotifier connStateCallbackLock sync.RWMutex serverURL string + + effMu sync.RWMutex + effective EffectiveConnConfig + + // Phase 3.7i (#5989): handler for server-pushed SnapshotRequests. + snapMu sync.Mutex + onSnapshotRequest func(nonce uint64) } type ExposeRequest struct { @@ -435,7 +442,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err } func (c *GrpcClient) connectToSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { - req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo)} + req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig())} myPrivateKey := c.key myPublicKey := myPrivateKey.PublicKey() @@ -473,6 +480,15 @@ func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncCli return err } + if req := decryptedResp.GetSnapshotRequest(); req != nil { + c.snapMu.Lock() + cb := c.onSnapshotRequest + c.snapMu.Unlock() + if cb != nil { + cb(req.GetNonce()) + } + } + if err := msgHandler(decryptedResp); err != nil { log.Errorf("failed handling an update message received from Management Service: %v", err.Error()) } @@ -595,7 +611,7 @@ func (c *GrpcClient) Register(setupKey string, jwtToken string, sysInfo *system. SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo, c.effectiveConnConfig()), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // Login attempts login to Management Server. Takes care of encrypting and decrypting messages. @@ -604,7 +620,7 @@ func (c *GrpcClient) Login(sysInfo *system.Info, pubSSHKey []byte, dnsLabels dom SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig()), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // GetDeviceAuthorizationFlow returns a device authorization flow information. @@ -700,7 +716,7 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return err } - syncMetaReq, err := encryption.EncryptMessage(*serverPubKey, c.key, &proto.SyncMetaRequest{Meta: infoToMetaData(sysInfo)}) + syncMetaReq, err := encryption.EncryptMessage(*serverPubKey, c.key, &proto.SyncMetaRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig())}) if err != nil { log.Errorf("failed to encrypt message: %s", err) return err @@ -716,6 +732,36 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return err } +// SyncPeerConnections is the GrpcClient implementation. Phase 3.7i of +// #5989. Mirrors SyncMeta: fetches server pubkey, encrypts the +// PeerConnectionMap with the peer's wg key, calls the new unary RPC. +func (c *GrpcClient) SyncPeerConnections(ctx context.Context, m *proto.PeerConnectionMap) error { + if !c.ready() { + return errors.New(errMsgNoMgmtConnection) + } + + serverPubKey, err := c.getServerPublicKey() + if err != nil { + log.Debugf(errMsgMgmtPublicKey, err) + return err + } + + encrypted, err := encryption.EncryptMessage(*serverPubKey, c.key, m) + if err != nil { + log.Errorf("encrypt PeerConnectionMap: %s", err) + return err + } + + mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout) + defer cancel() + + _, err = c.realClient.SyncPeerConnections(mgmCtx, &proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encrypted, + }) + return err +} + func (c *GrpcClient) notifyDisconnected(err error) { c.connStateCallbackLock.RLock() defer c.connStateCallbackLock.RUnlock() @@ -883,7 +929,43 @@ func toProtoExposeServiceRequest(req ExposeRequest) (*proto.ExposeServiceRequest }, nil } -func infoToMetaData(info *system.Info) *proto.PeerSystemMeta { +// EffectiveConnConfig captures the peer-engine-resolved connection mode +// + timeouts that should be reported to mgmt alongside system info. +// Phase 3.7i of #5989. +type EffectiveConnConfig struct { + Mode string + RelayTimeoutSecs uint32 + P2PTimeoutSecs uint32 + P2PRetryMaxSecs uint32 +} + +// effectiveConnConfig pulls the engine-resolved connection mode/timeouts +// to report in PeerSystemMeta. Empty when the engine has not registered +// itself with the client (early startup / standalone mock). Phase 3.7i. +func (c *GrpcClient) effectiveConnConfig() EffectiveConnConfig { + c.effMu.RLock() + defer c.effMu.RUnlock() + return c.effective +} + +// SetEffectiveConnConfig is called by the engine each time the resolved +// mode changes (typically once per NetworkMap update). Phase 3.7i. +func (c *GrpcClient) SetEffectiveConnConfig(eff EffectiveConnConfig) { + c.effMu.Lock() + defer c.effMu.Unlock() + c.effective = eff +} + +// SetSnapshotRequestHandler registers a callback invoked when the +// management server sends a SnapshotRequest over the Sync server-stream. +// Phase 3.7i of #5989. +func (c *GrpcClient) SetSnapshotRequestHandler(fn func(nonce uint64)) { + c.snapMu.Lock() + c.onSnapshotRequest = fn + c.snapMu.Unlock() +} + +func infoToMetaData(info *system.Info, eff EffectiveConnConfig) *proto.PeerSystemMeta { if info == nil { return nil } @@ -940,5 +1022,16 @@ func infoToMetaData(info *system.Info) *proto.PeerSystemMeta { LazyConnectionEnabled: info.LazyConnectionEnabled, }, + + EffectiveConnectionMode: eff.Mode, + EffectiveRelayTimeoutSecs: eff.RelayTimeoutSecs, + EffectiveP2PTimeoutSecs: eff.P2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: eff.P2PRetryMaxSecs, + + // Phase 3.7i (#5989): advertise capabilities so mgmt can decide + // whether legacy-compat fallbacks (e.g. p2p-dynamic -> p2p-lazy) + // need to apply for this client. Source of truth: + // client/system/features.go. + SupportedFeatures: system.SupportedFeatures(), } } diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 361e8ffadfc..5f6fa8dca81 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -19,7 +19,10 @@ type MockClient struct { GetServerURLFunc func() string HealthCheckFunc func() error SyncMetaFunc func(sysInfo *system.Info) error - LogoutFunc func() error + SyncPeerConnectionsFunc func(ctx context.Context, m *proto.PeerConnectionMap) error + SetEffectiveConnConfigFunc func(eff EffectiveConnConfig) + SetSnapshotRequestHandlerFunc func(fn func(nonce uint64)) + LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error CreateExposeFunc func(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) RenewExposeFunc func(ctx context.Context, domain string) error @@ -106,6 +109,25 @@ func (m *MockClient) SyncMeta(sysInfo *system.Info) error { return m.SyncMetaFunc(sysInfo) } +func (m *MockClient) SyncPeerConnections(ctx context.Context, pcm *proto.PeerConnectionMap) error { + if m.SyncPeerConnectionsFunc != nil { + return m.SyncPeerConnectionsFunc(ctx, pcm) + } + return nil +} + +func (m *MockClient) SetEffectiveConnConfig(eff EffectiveConnConfig) { + if m.SetEffectiveConnConfigFunc != nil { + m.SetEffectiveConnConfigFunc(eff) + } +} + +func (m *MockClient) SetSnapshotRequestHandler(fn func(nonce uint64)) { + if m.SetSnapshotRequestHandlerFunc != nil { + m.SetSnapshotRequestHandlerFunc(fn) + } +} + func (m *MockClient) Logout() error { if m.LogoutFunc == nil { return nil diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 327e2061425..64b341138a3 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -359,6 +359,76 @@ components: description: Enables or disables experimental lazy connection type: boolean example: true + connection_mode: + x-experimental: true + type: string + enum: [relay-forced, p2p, p2p-lazy, p2p-dynamic] + nullable: true + description: | + Account-wide default peer-connection mode. NULL means + "fall back to lazy_connection_enabled" for backwards compatibility. + Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + functional. p2p-dynamic is reserved (passes through as p2p in + Phase 1; will become functional in Phase 2). + p2p_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default ICE-worker idle timeout in seconds. 0 = never tear down. + Effective only in p2p-dynamic mode (added in Phase 2). + NULL means "use built-in default" (180 minutes). + p2p_retry_max_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Maximum interval between P2P retry attempts after consecutive + ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + disable backoff (always retry immediately, Phase-2 behavior). + Effective only in p2p-dynamic mode (added in Phase 3). + example: 900 + relay_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default relay-worker idle timeout in seconds. 0 = never tear + down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + client. NULL means "use built-in default" (5 minutes). + legacy_lazy_fallback_enabled: + x-experimental: true + type: boolean + description: | + Phase 3.7i (#5989) - Legacy compatibility for older clients. + When the account ConnectionMode is p2p-dynamic and a client + does NOT advertise the "p2p_dynamic" capability via + PeerSystemMeta.SupportedFeatures, the management server + transparently downgrades that client's PeerConfig to + p2p-lazy with the timeout below. Has no effect on clients + that support p2p-dynamic. Default true. Disable only if you + are certain the entire fleet is on a 3.7i+ daemon. + example: true + legacy_lazy_fallback_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 60 + maximum: 86400 + description: | + Phase 3.7i (#5989) - Inactivity timeout sent to legacy + clients via the lazy-fallback branch. Range 60-86400 (1 min + to 24 h); default 3600 (= 60 min). Long enough to not hammer + connection setup on flaky LTE links, short enough to + actually release idle peers. + example: 3600 auto_update_version: description: Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") type: string diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index dc916f81ac9..ed6cda0443c 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,6 +1,6 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.7.0 DO NOT EDIT. package api import ( @@ -13,8 +13,8 @@ import ( ) const ( - BearerAuthScopes = "BearerAuth.Scopes" - TokenAuthScopes = "TokenAuth.Scopes" + BearerAuthScopes bearerAuthContextKey = "BearerAuth.Scopes" + TokenAuthScopes tokenAuthContextKey = "TokenAuth.Scopes" ) // Defines values for AccessRestrictionsCrowdsecMode. @@ -38,6 +38,30 @@ func (e AccessRestrictionsCrowdsecMode) Valid() bool { } } +// Defines values for AccountSettingsConnectionMode. +const ( + AccountSettingsConnectionModeP2p AccountSettingsConnectionMode = "p2p" + AccountSettingsConnectionModeP2pDynamic AccountSettingsConnectionMode = "p2p-dynamic" + AccountSettingsConnectionModeP2pLazy AccountSettingsConnectionMode = "p2p-lazy" + AccountSettingsConnectionModeRelayForced AccountSettingsConnectionMode = "relay-forced" +) + +// Valid indicates whether the value is a known member of the AccountSettingsConnectionMode enum. +func (e AccountSettingsConnectionMode) Valid() bool { + switch e { + case AccountSettingsConnectionModeP2p: + return true + case AccountSettingsConnectionModeP2pDynamic: + return true + case AccountSettingsConnectionModeP2pLazy: + return true + case AccountSettingsConnectionModeRelayForced: + return true + default: + return false + } +} + // Defines values for CreateAzureIntegrationRequestHost. const ( CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" @@ -511,6 +535,7 @@ func (e GroupMinimumIssued) Valid() bool { // Defines values for IdentityProviderType. const ( + IdentityProviderTypeAdfs IdentityProviderType = "adfs" IdentityProviderTypeEntra IdentityProviderType = "entra" IdentityProviderTypeGoogle IdentityProviderType = "google" IdentityProviderTypeMicrosoft IdentityProviderType = "microsoft" @@ -518,12 +543,13 @@ const ( IdentityProviderTypeOkta IdentityProviderType = "okta" IdentityProviderTypePocketid IdentityProviderType = "pocketid" IdentityProviderTypeZitadel IdentityProviderType = "zitadel" - IdentityProviderTypeAdfs IdentityProviderType = "adfs" ) // Valid indicates whether the value is a known member of the IdentityProviderType enum. func (e IdentityProviderType) Valid() bool { switch e { + case IdentityProviderTypeAdfs: + return true case IdentityProviderTypeEntra: return true case IdentityProviderTypeGoogle: @@ -538,8 +564,6 @@ func (e IdentityProviderType) Valid() bool { return true case IdentityProviderTypeZitadel: return true - case IdentityProviderTypeAdfs: - return true default: return false } @@ -1455,6 +1479,13 @@ type AccountSettings struct { // AutoUpdateVersion Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") AutoUpdateVersion *string `json:"auto_update_version,omitempty"` + // ConnectionMode Account-wide default peer-connection mode. NULL means + // "fall back to lazy_connection_enabled" for backwards compatibility. + // Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + // functional. p2p-dynamic is reserved (passes through as p2p in + // Phase 1; will become functional in Phase 2). + ConnectionMode *AccountSettingsConnectionMode `json:"connection_mode,omitempty"` + // DnsDomain Allows to define a custom dns domain for the account DnsDomain *string `json:"dns_domain,omitempty"` @@ -1477,12 +1508,40 @@ type AccountSettings struct { // LazyConnectionEnabled Enables or disables experimental lazy connection LazyConnectionEnabled *bool `json:"lazy_connection_enabled,omitempty"` + // LegacyLazyFallbackEnabled Phase 3.7i (#5989) - Legacy compatibility for older clients. + // When the account ConnectionMode is p2p-dynamic and a client + // does NOT advertise the "p2p_dynamic" capability via + // PeerSystemMeta.SupportedFeatures, the management server + // transparently downgrades that client's PeerConfig to p2p-lazy + // with the timeout below. Has no effect on clients that support + // p2p-dynamic. Default true. Disable only if you are certain the + // entire fleet is on a 3.7i+ daemon. + LegacyLazyFallbackEnabled *bool `json:"legacy_lazy_fallback_enabled,omitempty"` + + // LegacyLazyFallbackTimeoutSeconds Phase 3.7i (#5989) - Inactivity timeout sent to legacy + // clients via the lazy-fallback branch. Range 60-86400 (1 min + // to 24 h); default 3600 (= 60 min). Long enough to not hammer + // connection setup on flaky LTE links, short enough to actually + // release idle peers. + LegacyLazyFallbackTimeoutSeconds *int64 `json:"legacy_lazy_fallback_timeout_seconds,omitempty"` + // LocalAuthDisabled Indicates whether local (email/password) authentication is disabled. When true, users can only authenticate via external identity providers. This is a read-only field. LocalAuthDisabled *bool `json:"local_auth_disabled,omitempty"` // NetworkRange Allows to define a custom network range for the account in CIDR format NetworkRange *string `json:"network_range,omitempty"` + // P2pRetryMaxSeconds Maximum interval between P2P retry attempts after consecutive + // ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + // disable backoff (always retry immediately, Phase-2 behavior). + // Effective only in p2p-dynamic mode (added in Phase 3). + P2pRetryMaxSeconds *int64 `json:"p2p_retry_max_seconds,omitempty"` + + // P2pTimeoutSeconds Default ICE-worker idle timeout in seconds. 0 = never tear down. + // Effective only in p2p-dynamic mode (added in Phase 2). + // NULL means "use built-in default" (180 minutes). + P2pTimeoutSeconds *int64 `json:"p2p_timeout_seconds,omitempty"` + // PeerExposeEnabled Enables or disables peer expose. If enabled, peers can expose local services through the reverse proxy using the CLI. PeerExposeEnabled bool `json:"peer_expose_enabled"` @@ -1504,10 +1563,23 @@ type AccountSettings struct { // RegularUsersViewBlocked Allows blocking regular users from viewing parts of the system. RegularUsersViewBlocked bool `json:"regular_users_view_blocked"` + // RelayTimeoutSeconds Default relay-worker idle timeout in seconds. 0 = never tear + // down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + // compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + // client. NULL means "use built-in default" (5 minutes). + RelayTimeoutSeconds *int64 `json:"relay_timeout_seconds,omitempty"` + // RoutingPeerDnsResolutionEnabled Enables or disables DNS resolution on the routing peers RoutingPeerDnsResolutionEnabled *bool `json:"routing_peer_dns_resolution_enabled,omitempty"` } +// AccountSettingsConnectionMode Account-wide default peer-connection mode. NULL means +// "fall back to lazy_connection_enabled" for backwards compatibility. +// Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are +// functional. p2p-dynamic is reserved (passes through as p2p in +// Phase 1; will become functional in Phase 2). +type AccountSettingsConnectionMode string + // AvailablePorts defines model for AvailablePorts. type AvailablePorts struct { // Tcp Number of available TCP ports left on the ingress peer @@ -1626,7 +1698,9 @@ type Checks struct { // OsVersionCheck Posture check for the version of operating system OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"` - // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. + // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it + // contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, + // so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:"peer_network_range_check,omitempty"` // ProcessCheck Posture Check for binaries exist and are running in the peer’s system @@ -3312,7 +3386,9 @@ type PeerMinimum struct { Name string `json:"name"` } -// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. +// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it +// contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, +// so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. type PeerNetworkRangeCheck struct { // Action Action to take upon policy match Action PeerNetworkRangeCheckAction `json:"action"` @@ -4761,6 +4837,12 @@ type ZoneRequest struct { // Conflict Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. type Conflict = ErrorResponse +// bearerAuthContextKey is the context key for BearerAuth security scheme +type bearerAuthContextKey string + +// tokenAuthContextKey is the context key for TokenAuth security scheme +type tokenAuthContextKey string + // GetApiEventsNetworkTrafficParams defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParams struct { // Page Page number diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 604f9c79385..69096c29437 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -71,6 +71,66 @@ func (JobStatus) EnumDescriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{0} } +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +type ConnectionMode int32 + +const ( + ConnectionMode_CONNECTION_MODE_UNSPECIFIED ConnectionMode = 0 + ConnectionMode_CONNECTION_MODE_RELAY_FORCED ConnectionMode = 1 + ConnectionMode_CONNECTION_MODE_P2P ConnectionMode = 2 + ConnectionMode_CONNECTION_MODE_P2P_LAZY ConnectionMode = 3 + ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC ConnectionMode = 4 +) + +// Enum value maps for ConnectionMode. +var ( + ConnectionMode_name = map[int32]string{ + 0: "CONNECTION_MODE_UNSPECIFIED", + 1: "CONNECTION_MODE_RELAY_FORCED", + 2: "CONNECTION_MODE_P2P", + 3: "CONNECTION_MODE_P2P_LAZY", + 4: "CONNECTION_MODE_P2P_DYNAMIC", + } + ConnectionMode_value = map[string]int32{ + "CONNECTION_MODE_UNSPECIFIED": 0, + "CONNECTION_MODE_RELAY_FORCED": 1, + "CONNECTION_MODE_P2P": 2, + "CONNECTION_MODE_P2P_LAZY": 3, + "CONNECTION_MODE_P2P_DYNAMIC": 4, + } +) + +func (x ConnectionMode) Enum() *ConnectionMode { + p := new(ConnectionMode) + *p = x + return p +} + +func (x ConnectionMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionMode) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[1].Descriptor() +} + +func (ConnectionMode) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[1] +} + +func (x ConnectionMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionMode.Descriptor instead. +func (ConnectionMode) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{1} +} + type RuleProtocol int32 const ( @@ -113,11 +173,11 @@ func (x RuleProtocol) String() string { } func (RuleProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[1].Descriptor() + return file_management_proto_enumTypes[2].Descriptor() } func (RuleProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[1] + return &file_management_proto_enumTypes[2] } func (x RuleProtocol) Number() protoreflect.EnumNumber { @@ -126,7 +186,7 @@ func (x RuleProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use RuleProtocol.Descriptor instead. func (RuleProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{2} } type RuleDirection int32 @@ -159,11 +219,11 @@ func (x RuleDirection) String() string { } func (RuleDirection) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[2].Descriptor() + return file_management_proto_enumTypes[3].Descriptor() } func (RuleDirection) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[2] + return &file_management_proto_enumTypes[3] } func (x RuleDirection) Number() protoreflect.EnumNumber { @@ -172,7 +232,7 @@ func (x RuleDirection) Number() protoreflect.EnumNumber { // Deprecated: Use RuleDirection.Descriptor instead. func (RuleDirection) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{3} } type RuleAction int32 @@ -205,11 +265,11 @@ func (x RuleAction) String() string { } func (RuleAction) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[3].Descriptor() + return file_management_proto_enumTypes[4].Descriptor() } func (RuleAction) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[3] + return &file_management_proto_enumTypes[4] } func (x RuleAction) Number() protoreflect.EnumNumber { @@ -218,7 +278,7 @@ func (x RuleAction) Number() protoreflect.EnumNumber { // Deprecated: Use RuleAction.Descriptor instead. func (RuleAction) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{3} + return file_management_proto_rawDescGZIP(), []int{4} } type ExposeProtocol int32 @@ -260,11 +320,11 @@ func (x ExposeProtocol) String() string { } func (ExposeProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (ExposeProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x ExposeProtocol) Number() protoreflect.EnumNumber { @@ -273,7 +333,62 @@ func (x ExposeProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use ExposeProtocol.Descriptor instead. func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{4} + return file_management_proto_rawDescGZIP(), []int{5} +} + +type ConnType int32 + +const ( + ConnType_CONN_TYPE_UNSPECIFIED ConnType = 0 + ConnType_CONN_TYPE_IDLE ConnType = 1 + ConnType_CONN_TYPE_CONNECTING ConnType = 2 + ConnType_CONN_TYPE_P2P ConnType = 3 + ConnType_CONN_TYPE_RELAYED ConnType = 4 +) + +// Enum value maps for ConnType. +var ( + ConnType_name = map[int32]string{ + 0: "CONN_TYPE_UNSPECIFIED", + 1: "CONN_TYPE_IDLE", + 2: "CONN_TYPE_CONNECTING", + 3: "CONN_TYPE_P2P", + 4: "CONN_TYPE_RELAYED", + } + ConnType_value = map[string]int32{ + "CONN_TYPE_UNSPECIFIED": 0, + "CONN_TYPE_IDLE": 1, + "CONN_TYPE_CONNECTING": 2, + "CONN_TYPE_P2P": 3, + "CONN_TYPE_RELAYED": 4, + } +) + +func (x ConnType) Enum() *ConnType { + p := new(ConnType) + *p = x + return p +} + +func (x ConnType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnType) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[6].Descriptor() +} + +func (ConnType) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[6] +} + +func (x ConnType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnType.Descriptor instead. +func (ConnType) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{6} } type HostConfig_Protocol int32 @@ -315,11 +430,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[5].Descriptor() + return file_management_proto_enumTypes[7].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[5] + return &file_management_proto_enumTypes[7] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -358,11 +473,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[6].Descriptor() + return file_management_proto_enumTypes[8].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[6] + return &file_management_proto_enumTypes[8] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -790,6 +905,10 @@ type SyncResponse struct { NetworkMap *NetworkMap `protobuf:"bytes,5,opt,name=NetworkMap,proto3" json:"NetworkMap,omitempty"` // Posture checks to be evaluated by client Checks []*Checks `protobuf:"bytes,6,rep,name=Checks,proto3" json:"Checks,omitempty"` + // Phase 3.7i (#5989): on-demand refresh request for the peer's + // connection map. Peer responds via SyncPeerConnections RPC with + // in_response_to_nonce echoing this nonce. + SnapshotRequest *PeerSnapshotRequest `protobuf:"bytes,7,opt,name=snapshot_request,json=snapshotRequest,proto3" json:"snapshot_request,omitempty"` } func (x *SyncResponse) Reset() { @@ -866,6 +985,13 @@ func (x *SyncResponse) GetChecks() []*Checks { return nil } +func (x *SyncResponse) GetSnapshotRequest() *PeerSnapshotRequest { + if x != nil { + return x.SnapshotRequest + } + return nil +} + type SyncMetaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1363,6 +1489,21 @@ type PeerSystemMeta struct { Environment *Environment `protobuf:"bytes,15,opt,name=environment,proto3" json:"environment,omitempty"` Files []*File `protobuf:"bytes,16,rep,name=files,proto3" json:"files,omitempty"` Flags *Flags `protobuf:"bytes,17,opt,name=flags,proto3" json:"flags,omitempty"` + // Phase 3.7i (#5989): connection mode/timeouts this peer is actually + // running with. Mgmt copies into RemotePeerConfig of every other peer. + EffectiveConnectionMode string `protobuf:"bytes,50,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,51,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,52,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,53,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` + // Phase 3.7i (#5989): list of capability keywords this client build + // supports. Old clients leave this empty (proto3 default for repeated + // fields). The management server uses this list to decide whether to + // fall back to legacy settings for clients that do not yet implement a + // feature - e.g. when ConnectionMode is p2p-dynamic and the client does + // not advertise "p2p_dynamic", mgmt downgrades it to p2p-lazy with the + // admin-configured legacy timeout. See client/system/features.go for + // the source of truth on which keywords this client build advertises. + SupportedFeatures []string `protobuf:"bytes,60,rep,name=supported_features,json=supportedFeatures,proto3" json:"supported_features,omitempty"` } func (x *PeerSystemMeta) Reset() { @@ -1516,6 +1657,41 @@ func (x *PeerSystemMeta) GetFlags() *Flags { return nil } +func (x *PeerSystemMeta) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *PeerSystemMeta) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *PeerSystemMeta) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *PeerSystemMeta) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + +func (x *PeerSystemMeta) GetSupportedFeatures() []string { + if x != nil { + return x.SupportedFeatures + } + return nil +} + type LoginResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2163,6 +2339,27 @@ type PeerConfig struct { Mtu int32 `protobuf:"varint,7,opt,name=mtu,proto3" json:"mtu,omitempty"` // Auto-update config AutoUpdate *AutoUpdateSettings `protobuf:"bytes,8,opt,name=autoUpdate,proto3" json:"autoUpdate,omitempty"` + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode `protobuf:"varint,11,opt,name=ConnectionMode,proto3,enum=management.ConnectionMode" json:"ConnectionMode,omitempty"` + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + P2PTimeoutSeconds uint32 `protobuf:"varint,12,opt,name=P2pTimeoutSeconds,proto3" json:"P2pTimeoutSeconds,omitempty"` + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + RelayTimeoutSeconds uint32 `protobuf:"varint,13,opt,name=RelayTimeoutSeconds,proto3" json:"RelayTimeoutSeconds,omitempty"` + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + P2PRetryMaxSeconds uint32 `protobuf:"varint,14,opt,name=P2pRetryMaxSeconds,proto3" json:"P2pRetryMaxSeconds,omitempty"` } func (x *PeerConfig) Reset() { @@ -2253,6 +2450,34 @@ func (x *PeerConfig) GetAutoUpdate() *AutoUpdateSettings { return nil } +func (x *PeerConfig) GetConnectionMode() ConnectionMode { + if x != nil { + return x.ConnectionMode + } + return ConnectionMode_CONNECTION_MODE_UNSPECIFIED +} + +func (x *PeerConfig) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + type AutoUpdateSettings struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2597,6 +2822,36 @@ type RemotePeerConfig struct { // Peer fully qualified domain name Fqdn string `protobuf:"bytes,4,opt,name=fqdn,proto3" json:"fqdn,omitempty"` AgentVersion string `protobuf:"bytes,5,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + // Phase 3.7i (#5989): connection mode/timeouts the remote peer is + // actually running with (after env > local-cfg > server-pushed > legacy + // resolution), as reported by that peer in its PeerSystemMeta. Empty + // when remote peer pre-dates Phase 3.7i. + EffectiveConnectionMode string `protobuf:"bytes,6,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,7,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,8,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,9,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` + // Connection mode/timeouts the management server has configured for + // that peer via dashboard policy/group. UI compares effective vs + // configured to spot local overrides (≠ → ⚠). + ConfiguredConnectionMode string `protobuf:"bytes,10,opt,name=configured_connection_mode,json=configuredConnectionMode,proto3" json:"configured_connection_mode,omitempty"` + ConfiguredRelayTimeoutSecs uint32 `protobuf:"varint,11,opt,name=configured_relay_timeout_secs,json=configuredRelayTimeoutSecs,proto3" json:"configured_relay_timeout_secs,omitempty"` + ConfiguredP2PTimeoutSecs uint32 `protobuf:"varint,12,opt,name=configured_p2p_timeout_secs,json=configuredP2pTimeoutSecs,proto3" json:"configured_p2p_timeout_secs,omitempty"` + ConfiguredP2PRetryMaxSecs uint32 `protobuf:"varint,13,opt,name=configured_p2p_retry_max_secs,json=configuredP2pRetryMaxSecs,proto3" json:"configured_p2p_retry_max_secs,omitempty"` + // Phase 3.7i: server-knowledge fields surfaced to UIs without an + // extra Mgmt API call (already in the NetworkMap stream context). + LastSeenAtServer *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=last_seen_at_server,json=lastSeenAtServer,proto3" json:"last_seen_at_server,omitempty"` + Groups []string `protobuf:"bytes,15,rep,name=groups,proto3" json:"groups,omitempty"` + // Live online flag: peer.Status.Connected on the management server. + // True = peer is currently heartbeating. False = peer hasn't checked + // in (hardware/network down) but its login is still valid (otherwise + // it would be in OfflinePeers, not RemotePeers). + LiveOnline bool `protobuf:"varint,16,opt,name=live_online,json=liveOnline,proto3" json:"live_online,omitempty"` + // Server-knowledge marker: true when the management server is new + // enough to populate live_online authoritatively. Old servers leave + // this field at false (default), and new clients then fall back to + // legacy heuristics (assume online when live_online is false but + // last_seen_at_server is also unset, i.e. nothing is known). + ServerLivenessKnown bool `protobuf:"varint,17,opt,name=server_liveness_known,json=serverLivenessKnown,proto3" json:"server_liveness_known,omitempty"` } func (x *RemotePeerConfig) Reset() { @@ -2666,6 +2921,90 @@ func (x *RemotePeerConfig) GetAgentVersion() string { return "" } +func (x *RemotePeerConfig) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *RemotePeerConfig) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredConnectionMode() string { + if x != nil { + return x.ConfiguredConnectionMode + } + return "" +} + +func (x *RemotePeerConfig) GetConfiguredRelayTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredRelayTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredP2PTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredP2PTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredP2PRetryMaxSecs() uint32 { + if x != nil { + return x.ConfiguredP2PRetryMaxSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetLastSeenAtServer() *timestamppb.Timestamp { + if x != nil { + return x.LastSeenAtServer + } + return nil +} + +func (x *RemotePeerConfig) GetGroups() []string { + if x != nil { + return x.Groups + } + return nil +} + +func (x *RemotePeerConfig) GetLiveOnline() bool { + if x != nil { + return x.LiveOnline + } + return false +} + +func (x *RemotePeerConfig) GetServerLivenessKnown() bool { + if x != nil { + return x.ServerLivenessKnown + } + return false +} + // SSHConfig represents SSH configurations of a peer. type SSHConfig struct { state protoimpl.MessageState @@ -4385,6 +4724,244 @@ func (*StopExposeResponse) Descriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{52} } +// Phase 3.7i (#5989): per-peer connection-state push payload (encrypted +// body of SyncPeerConnections request). +type PeerConnectionMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Seq uint64 `protobuf:"varint,1,opt,name=seq,proto3" json:"seq,omitempty"` + FullSnapshot bool `protobuf:"varint,2,opt,name=full_snapshot,json=fullSnapshot,proto3" json:"full_snapshot,omitempty"` + Entries []*PeerConnectionEntry `protobuf:"bytes,3,rep,name=entries,proto3" json:"entries,omitempty"` + InResponseToNonce uint64 `protobuf:"varint,4,opt,name=in_response_to_nonce,json=inResponseToNonce,proto3" json:"in_response_to_nonce,omitempty"` + // Phase 3.7i lifecycle hardening (Codex follow-up): random uint64 + // generated once per daemon process. Lets mgmt detect a daemon + // restart even if a stale unary RPC from the previous process + // arrives AFTER the new process's full snapshot. Mgmt drops any + // delta whose session_id doesn't match the cached entry's. + // Legacy clients send 0 (Phase 3.7i shipped without this field); + // mgmt falls back to seq-only behaviour for those. + SessionId uint64 `protobuf:"varint,5,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (x *PeerConnectionMap) Reset() { + *x = PeerConnectionMap{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerConnectionMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerConnectionMap) ProtoMessage() {} + +func (x *PeerConnectionMap) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerConnectionMap.ProtoReflect.Descriptor instead. +func (*PeerConnectionMap) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{53} +} + +func (x *PeerConnectionMap) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *PeerConnectionMap) GetFullSnapshot() bool { + if x != nil { + return x.FullSnapshot + } + return false +} + +func (x *PeerConnectionMap) GetEntries() []*PeerConnectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *PeerConnectionMap) GetInResponseToNonce() uint64 { + if x != nil { + return x.InResponseToNonce + } + return 0 +} + +func (x *PeerConnectionMap) GetSessionId() uint64 { + if x != nil { + return x.SessionId + } + return 0 +} + +type PeerConnectionEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RemotePubkey string `protobuf:"bytes,1,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` + ConnType ConnType `protobuf:"varint,2,opt,name=conn_type,json=connType,proto3,enum=management.ConnType" json:"conn_type,omitempty"` + LastHandshake *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_handshake,json=lastHandshake,proto3" json:"last_handshake,omitempty"` + LatencyMs uint32 `protobuf:"varint,4,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"` + Endpoint string `protobuf:"bytes,5,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + RelayServer string `protobuf:"bytes,6,opt,name=relay_server,json=relayServer,proto3" json:"relay_server,omitempty"` + RxBytes uint64 `protobuf:"varint,7,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + TxBytes uint64 `protobuf:"varint,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` +} + +func (x *PeerConnectionEntry) Reset() { + *x = PeerConnectionEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerConnectionEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerConnectionEntry) ProtoMessage() {} + +func (x *PeerConnectionEntry) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerConnectionEntry.ProtoReflect.Descriptor instead. +func (*PeerConnectionEntry) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{54} +} + +func (x *PeerConnectionEntry) GetRemotePubkey() string { + if x != nil { + return x.RemotePubkey + } + return "" +} + +func (x *PeerConnectionEntry) GetConnType() ConnType { + if x != nil { + return x.ConnType + } + return ConnType_CONN_TYPE_UNSPECIFIED +} + +func (x *PeerConnectionEntry) GetLastHandshake() *timestamppb.Timestamp { + if x != nil { + return x.LastHandshake + } + return nil +} + +func (x *PeerConnectionEntry) GetLatencyMs() uint32 { + if x != nil { + return x.LatencyMs + } + return 0 +} + +func (x *PeerConnectionEntry) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *PeerConnectionEntry) GetRelayServer() string { + if x != nil { + return x.RelayServer + } + return "" +} + +func (x *PeerConnectionEntry) GetRxBytes() uint64 { + if x != nil { + return x.RxBytes + } + return 0 +} + +func (x *PeerConnectionEntry) GetTxBytes() uint64 { + if x != nil { + return x.TxBytes + } + return 0 +} + +type PeerSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *PeerSnapshotRequest) Reset() { + *x = PeerSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerSnapshotRequest) ProtoMessage() {} + +func (x *PeerSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerSnapshotRequest.ProtoReflect.Descriptor instead. +func (*PeerSnapshotRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{55} +} + +func (x *PeerSnapshotRequest) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + type PortInfo_Range struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4397,7 +4974,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[54] + mi := &file_management_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4410,7 +4987,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[54] + mi := &file_management_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4489,7 +5066,7 @@ var file_management_proto_rawDesc = []byte{ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, + 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xa7, 0x03, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, @@ -4511,626 +5088,767 @@ var file_management_proto_rawDesc = []byte{ 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, - 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, - 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, - 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, - 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, - 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, - 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, - 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, - 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, - 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, - 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, - 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x4a, 0x0a, 0x10, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, + 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x44, 0x0a, + 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, + 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, + 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, + 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x10, + 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, + 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x22, 0x0a, 0x0c, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0d, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, + 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, - 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, - 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, 0x0a, 0x0e, 0x50, 0x65, 0x65, - 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, - 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, - 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, - 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, + 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, 0x0e, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, + 0x41, 0x75, 0x74, 0x68, 0x22, 0x9b, 0x07, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, + 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, + 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x4f, 0x53, 0x12, + 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x69, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4f, + 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, - 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, - 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, - 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, - 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, - 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xb4, 0x01, - 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x73, + 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, + 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x32, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x33, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, + 0x73, 0x65, 0x63, 0x73, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, + 0x65, 0x63, 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, + 0x65, 0x63, 0x73, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, + 0x65, 0x63, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x3c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, + 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x38, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, + 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, + 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, + 0x75, 0x72, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, - 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, - 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x05, 0x74, 0x75, 0x72, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x12, - 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, - 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x2a, - 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x48, - 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x02, - 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, - 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2e, - 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x78, 0x69, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, - 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, - 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6b, 0x65, - 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, - 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, - 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, - 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, - 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, - 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, - 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, - 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, - 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, - 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, - 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, - 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, - 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, - 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, - 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, - 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, - 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, - 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, - 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, - 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, - 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, - 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, - 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, - 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, - 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, - 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, - 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, - 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, - 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, - 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, - 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, - 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, - 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, - 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, - 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, - 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, - 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, - 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, - 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, - 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, - 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, - 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, - 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, - 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, - 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, + 0x6c, 0x61, 0x79, 0x12, 0x2a, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, + 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, + 0x98, 0x01, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, + 0x12, 0x3b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, + 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, + 0x54, 0x54, 0x50, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, + 0x12, 0x08, 0x0a, 0x04, 0x44, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, + 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, + 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, + 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, + 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, + 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, + 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, + 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, + 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, + 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xb3, + 0x04, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, + 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, + 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, + 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, + 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x32, 0x70, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x11, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x13, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x32, 0x70, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, + 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, + 0x08, 0x0a, 0x10, 0x0b, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, + 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, + 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, + 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, + 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, + 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, + 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, + 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, + 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, + 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, + 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, + 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, + 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xef, 0x06, 0x0a, 0x10, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, + 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, + 0x63, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, + 0x65, 0x63, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, + 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x32, + 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x73, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x73, + 0x12, 0x3c, 0x0a, 0x1a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x41, + 0x0a, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6c, + 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x64, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, + 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, + 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x64, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, + 0x12, 0x40, 0x0a, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x70, + 0x32, 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, + 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x64, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, + 0x63, 0x73, 0x12, 0x49, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, + 0x61, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6c, 0x61, 0x73, + 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x69, 0x76, 0x65, + 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x76, + 0x65, 0x6e, 0x65, 0x73, 0x73, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, + 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, + 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, + 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, + 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, + 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, + 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, 0x0e, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, + 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, + 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, + 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, + 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, + 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, + 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, + 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, + 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, - 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, - 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, - 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, - 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, - 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, - 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, + 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, + 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, + 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, + 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, + 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, + 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, + 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, + 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, + 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, + 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, - 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, - 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, - 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, - 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, - 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, - 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, - 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, - 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, - 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, - 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, - 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, - 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, - 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, - 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, - 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, - 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, - 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, - 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, - 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, - 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, - 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, - 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, - 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, - 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, - 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, - 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, + 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, + 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x8b, + 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, 0x0a, + 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, + 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, + 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x11, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x70, 0x12, 0x10, + 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, 0x71, + 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x39, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x2f, 0x0a, 0x14, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x74, 0x6f, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x6f, 0x4e, 0x6f, 0x6e, 0x63, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x22, 0xc4, 0x02, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, + 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, + 0x6e, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4d, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, + 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x74, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, + 0x6f, 0x6e, 0x63, 0x65, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, + 0x65, 0x64, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, + 0x2a, 0xab, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, 0x46, 0x4f, + 0x52, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x10, 0x02, 0x12, + 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, + 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x5f, 0x4c, 0x41, 0x5a, 0x59, 0x10, 0x03, 0x12, 0x1f, 0x0a, + 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x50, 0x32, 0x50, 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x04, 0x2a, 0x4c, + 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, + 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, + 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, + 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, + 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, + 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, + 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, + 0x10, 0x01, 0x2a, 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, + 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x2a, 0x7d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, + 0x0a, 0x0e, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x44, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, + 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x10, 0x03, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4c, + 0x41, 0x59, 0x45, 0x44, 0x10, 0x04, 0x32, 0xc7, 0x07, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, + 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, - 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, - 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, - 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, + 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, - 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, + 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, + 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, + 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x13, 0x53, 0x79, 0x6e, + 0x63, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, + 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, + 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, + 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -5145,166 +5863,179 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 55) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 58) var file_management_proto_goTypes = []interface{}{ (JobStatus)(0), // 0: management.JobStatus - (RuleProtocol)(0), // 1: management.RuleProtocol - (RuleDirection)(0), // 2: management.RuleDirection - (RuleAction)(0), // 3: management.RuleAction - (ExposeProtocol)(0), // 4: management.ExposeProtocol - (HostConfig_Protocol)(0), // 5: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 6: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 7: management.EncryptedMessage - (*JobRequest)(nil), // 8: management.JobRequest - (*JobResponse)(nil), // 9: management.JobResponse - (*BundleParameters)(nil), // 10: management.BundleParameters - (*BundleResult)(nil), // 11: management.BundleResult - (*SyncRequest)(nil), // 12: management.SyncRequest - (*SyncResponse)(nil), // 13: management.SyncResponse - (*SyncMetaRequest)(nil), // 14: management.SyncMetaRequest - (*LoginRequest)(nil), // 15: management.LoginRequest - (*PeerKeys)(nil), // 16: management.PeerKeys - (*Environment)(nil), // 17: management.Environment - (*File)(nil), // 18: management.File - (*Flags)(nil), // 19: management.Flags - (*PeerSystemMeta)(nil), // 20: management.PeerSystemMeta - (*LoginResponse)(nil), // 21: management.LoginResponse - (*ServerKeyResponse)(nil), // 22: management.ServerKeyResponse - (*Empty)(nil), // 23: management.Empty - (*NetbirdConfig)(nil), // 24: management.NetbirdConfig - (*HostConfig)(nil), // 25: management.HostConfig - (*RelayConfig)(nil), // 26: management.RelayConfig - (*FlowConfig)(nil), // 27: management.FlowConfig - (*JWTConfig)(nil), // 28: management.JWTConfig - (*ProtectedHostConfig)(nil), // 29: management.ProtectedHostConfig - (*PeerConfig)(nil), // 30: management.PeerConfig - (*AutoUpdateSettings)(nil), // 31: management.AutoUpdateSettings - (*NetworkMap)(nil), // 32: management.NetworkMap - (*SSHAuth)(nil), // 33: management.SSHAuth - (*MachineUserIndexes)(nil), // 34: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 35: management.RemotePeerConfig - (*SSHConfig)(nil), // 36: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 37: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 38: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 39: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 40: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 41: management.ProviderConfig - (*Route)(nil), // 42: management.Route - (*DNSConfig)(nil), // 43: management.DNSConfig - (*CustomZone)(nil), // 44: management.CustomZone - (*SimpleRecord)(nil), // 45: management.SimpleRecord - (*NameServerGroup)(nil), // 46: management.NameServerGroup - (*NameServer)(nil), // 47: management.NameServer - (*FirewallRule)(nil), // 48: management.FirewallRule - (*NetworkAddress)(nil), // 49: management.NetworkAddress - (*Checks)(nil), // 50: management.Checks - (*PortInfo)(nil), // 51: management.PortInfo - (*RouteFirewallRule)(nil), // 52: management.RouteFirewallRule - (*ForwardingRule)(nil), // 53: management.ForwardingRule - (*ExposeServiceRequest)(nil), // 54: management.ExposeServiceRequest - (*ExposeServiceResponse)(nil), // 55: management.ExposeServiceResponse - (*RenewExposeRequest)(nil), // 56: management.RenewExposeRequest - (*RenewExposeResponse)(nil), // 57: management.RenewExposeResponse - (*StopExposeRequest)(nil), // 58: management.StopExposeRequest - (*StopExposeResponse)(nil), // 59: management.StopExposeResponse - nil, // 60: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 61: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 62: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 63: google.protobuf.Duration + (ConnectionMode)(0), // 1: management.ConnectionMode + (RuleProtocol)(0), // 2: management.RuleProtocol + (RuleDirection)(0), // 3: management.RuleDirection + (RuleAction)(0), // 4: management.RuleAction + (ExposeProtocol)(0), // 5: management.ExposeProtocol + (ConnType)(0), // 6: management.ConnType + (HostConfig_Protocol)(0), // 7: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 8: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 9: management.EncryptedMessage + (*JobRequest)(nil), // 10: management.JobRequest + (*JobResponse)(nil), // 11: management.JobResponse + (*BundleParameters)(nil), // 12: management.BundleParameters + (*BundleResult)(nil), // 13: management.BundleResult + (*SyncRequest)(nil), // 14: management.SyncRequest + (*SyncResponse)(nil), // 15: management.SyncResponse + (*SyncMetaRequest)(nil), // 16: management.SyncMetaRequest + (*LoginRequest)(nil), // 17: management.LoginRequest + (*PeerKeys)(nil), // 18: management.PeerKeys + (*Environment)(nil), // 19: management.Environment + (*File)(nil), // 20: management.File + (*Flags)(nil), // 21: management.Flags + (*PeerSystemMeta)(nil), // 22: management.PeerSystemMeta + (*LoginResponse)(nil), // 23: management.LoginResponse + (*ServerKeyResponse)(nil), // 24: management.ServerKeyResponse + (*Empty)(nil), // 25: management.Empty + (*NetbirdConfig)(nil), // 26: management.NetbirdConfig + (*HostConfig)(nil), // 27: management.HostConfig + (*RelayConfig)(nil), // 28: management.RelayConfig + (*FlowConfig)(nil), // 29: management.FlowConfig + (*JWTConfig)(nil), // 30: management.JWTConfig + (*ProtectedHostConfig)(nil), // 31: management.ProtectedHostConfig + (*PeerConfig)(nil), // 32: management.PeerConfig + (*AutoUpdateSettings)(nil), // 33: management.AutoUpdateSettings + (*NetworkMap)(nil), // 34: management.NetworkMap + (*SSHAuth)(nil), // 35: management.SSHAuth + (*MachineUserIndexes)(nil), // 36: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 37: management.RemotePeerConfig + (*SSHConfig)(nil), // 38: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 39: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 40: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 41: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 42: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 43: management.ProviderConfig + (*Route)(nil), // 44: management.Route + (*DNSConfig)(nil), // 45: management.DNSConfig + (*CustomZone)(nil), // 46: management.CustomZone + (*SimpleRecord)(nil), // 47: management.SimpleRecord + (*NameServerGroup)(nil), // 48: management.NameServerGroup + (*NameServer)(nil), // 49: management.NameServer + (*FirewallRule)(nil), // 50: management.FirewallRule + (*NetworkAddress)(nil), // 51: management.NetworkAddress + (*Checks)(nil), // 52: management.Checks + (*PortInfo)(nil), // 53: management.PortInfo + (*RouteFirewallRule)(nil), // 54: management.RouteFirewallRule + (*ForwardingRule)(nil), // 55: management.ForwardingRule + (*ExposeServiceRequest)(nil), // 56: management.ExposeServiceRequest + (*ExposeServiceResponse)(nil), // 57: management.ExposeServiceResponse + (*RenewExposeRequest)(nil), // 58: management.RenewExposeRequest + (*RenewExposeResponse)(nil), // 59: management.RenewExposeResponse + (*StopExposeRequest)(nil), // 60: management.StopExposeRequest + (*StopExposeResponse)(nil), // 61: management.StopExposeResponse + (*PeerConnectionMap)(nil), // 62: management.PeerConnectionMap + (*PeerConnectionEntry)(nil), // 63: management.PeerConnectionEntry + (*PeerSnapshotRequest)(nil), // 64: management.PeerSnapshotRequest + nil, // 65: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 66: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 67: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 68: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 10, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 12, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters 0, // 1: management.JobResponse.status:type_name -> management.JobStatus - 11, // 2: management.JobResponse.bundle:type_name -> management.BundleResult - 20, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 24, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 35, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 32, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 50, // 8: management.SyncResponse.Checks:type_name -> management.Checks - 20, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 20, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 16, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 49, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 17, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment - 18, // 14: management.PeerSystemMeta.files:type_name -> management.File - 19, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags - 24, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 50, // 18: management.LoginResponse.Checks:type_name -> management.Checks - 62, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 25, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 29, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 25, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig - 26, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 27, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 5, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 63, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 25, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 36, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 31, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 30, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 35, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 42, // 32: management.NetworkMap.Routes:type_name -> management.Route - 43, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 35, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 48, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 52, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 53, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 33, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 60, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 36, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 28, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 6, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 41, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 41, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 46, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 44, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 45, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord - 47, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer - 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection - 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction - 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 51, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 61, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range - 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction - 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 51, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo - 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 51, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 51, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 4, // 60: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol - 34, // 61: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 7, // 62: management.ManagementService.Login:input_type -> management.EncryptedMessage - 7, // 63: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 23, // 64: management.ManagementService.GetServerKey:input_type -> management.Empty - 23, // 65: management.ManagementService.isHealthy:input_type -> management.Empty - 7, // 66: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 67: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 68: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 7, // 69: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 7, // 70: management.ManagementService.Job:input_type -> management.EncryptedMessage - 7, // 71: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage - 7, // 72: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage - 7, // 73: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage - 7, // 74: management.ManagementService.Login:output_type -> management.EncryptedMessage - 7, // 75: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 22, // 76: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 23, // 77: management.ManagementService.isHealthy:output_type -> management.Empty - 7, // 78: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 7, // 79: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 23, // 80: management.ManagementService.SyncMeta:output_type -> management.Empty - 23, // 81: management.ManagementService.Logout:output_type -> management.Empty - 7, // 82: management.ManagementService.Job:output_type -> management.EncryptedMessage - 7, // 83: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage - 7, // 84: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage - 7, // 85: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage - 74, // [74:86] is the sub-list for method output_type - 62, // [62:74] is the sub-list for method input_type - 62, // [62:62] is the sub-list for extension type_name - 62, // [62:62] is the sub-list for extension extendee - 0, // [0:62] is the sub-list for field type_name + 13, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 22, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 26, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 32, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 37, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 34, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 52, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 64, // 9: management.SyncResponse.snapshot_request:type_name -> management.PeerSnapshotRequest + 22, // 10: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 22, // 11: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 18, // 12: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 51, // 13: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 19, // 14: management.PeerSystemMeta.environment:type_name -> management.Environment + 20, // 15: management.PeerSystemMeta.files:type_name -> management.File + 21, // 16: management.PeerSystemMeta.flags:type_name -> management.Flags + 26, // 17: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 32, // 18: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 52, // 19: management.LoginResponse.Checks:type_name -> management.Checks + 67, // 20: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 27, // 21: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 31, // 22: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 27, // 23: management.NetbirdConfig.signal:type_name -> management.HostConfig + 28, // 24: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 29, // 25: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 7, // 26: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 68, // 27: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 27, // 28: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 38, // 29: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 33, // 30: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 1, // 31: management.PeerConfig.ConnectionMode:type_name -> management.ConnectionMode + 32, // 32: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 37, // 33: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 44, // 34: management.NetworkMap.Routes:type_name -> management.Route + 45, // 35: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 37, // 36: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 50, // 37: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 54, // 38: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 55, // 39: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 35, // 40: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 65, // 41: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 38, // 42: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 67, // 43: management.RemotePeerConfig.last_seen_at_server:type_name -> google.protobuf.Timestamp + 30, // 44: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 8, // 45: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 43, // 46: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 43, // 47: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 48, // 48: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 46, // 49: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 47, // 50: management.CustomZone.Records:type_name -> management.SimpleRecord + 49, // 51: management.NameServerGroup.NameServers:type_name -> management.NameServer + 3, // 52: management.FirewallRule.Direction:type_name -> management.RuleDirection + 4, // 53: management.FirewallRule.Action:type_name -> management.RuleAction + 2, // 54: management.FirewallRule.Protocol:type_name -> management.RuleProtocol + 53, // 55: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 66, // 56: management.PortInfo.range:type_name -> management.PortInfo.Range + 4, // 57: management.RouteFirewallRule.action:type_name -> management.RuleAction + 2, // 58: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol + 53, // 59: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 2, // 60: management.ForwardingRule.protocol:type_name -> management.RuleProtocol + 53, // 61: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 53, // 62: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 5, // 63: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol + 63, // 64: management.PeerConnectionMap.entries:type_name -> management.PeerConnectionEntry + 6, // 65: management.PeerConnectionEntry.conn_type:type_name -> management.ConnType + 67, // 66: management.PeerConnectionEntry.last_handshake:type_name -> google.protobuf.Timestamp + 36, // 67: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 9, // 68: management.ManagementService.Login:input_type -> management.EncryptedMessage + 9, // 69: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 25, // 70: management.ManagementService.GetServerKey:input_type -> management.Empty + 25, // 71: management.ManagementService.isHealthy:input_type -> management.Empty + 9, // 72: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 9, // 73: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 9, // 74: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 9, // 75: management.ManagementService.SyncPeerConnections:input_type -> management.EncryptedMessage + 9, // 76: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 9, // 77: management.ManagementService.Job:input_type -> management.EncryptedMessage + 9, // 78: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage + 9, // 79: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage + 9, // 80: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage + 9, // 81: management.ManagementService.Login:output_type -> management.EncryptedMessage + 9, // 82: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 24, // 83: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 25, // 84: management.ManagementService.isHealthy:output_type -> management.Empty + 9, // 85: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 9, // 86: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 25, // 87: management.ManagementService.SyncMeta:output_type -> management.Empty + 25, // 88: management.ManagementService.SyncPeerConnections:output_type -> management.Empty + 25, // 89: management.ManagementService.Logout:output_type -> management.Empty + 9, // 90: management.ManagementService.Job:output_type -> management.EncryptedMessage + 9, // 91: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage + 9, // 92: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage + 9, // 93: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage + 81, // [81:94] is the sub-list for method output_type + 68, // [68:81] is the sub-list for method input_type + 68, // [68:68] is the sub-list for extension type_name + 68, // [68:68] is the sub-list for extension extendee + 0, // [0:68] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -5949,7 +6680,43 @@ func file_management_proto_init() { return nil } } + file_management_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerConnectionMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_management_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerConnectionEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -5977,8 +6744,8 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 7, - NumMessages: 55, + NumEnums: 9, + NumMessages: 58, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 70a53067974..777d9cb189e 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -46,6 +46,12 @@ service ManagementService { // EncryptedMessage of the request has a body of Empty. rpc SyncMeta(EncryptedMessage) returns (Empty) {} + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + rpc SyncPeerConnections(EncryptedMessage) returns (Empty) {} + // Logout logs out the peer and removes it from the management server rpc Logout(EncryptedMessage) returns (Empty) {} @@ -133,6 +139,11 @@ message SyncResponse { // Posture checks to be evaluated by client repeated Checks Checks = 6; + + // Phase 3.7i (#5989): on-demand refresh request for the peer's + // connection map. Peer responds via SyncPeerConnections RPC with + // in_response_to_nonce echoing this nonce. + PeerSnapshotRequest snapshot_request = 7; } message SyncMetaRequest { @@ -221,6 +232,23 @@ message PeerSystemMeta { Environment environment = 15; repeated File files = 16; Flags flags = 17; + + // Phase 3.7i (#5989): connection mode/timeouts this peer is actually + // running with. Mgmt copies into RemotePeerConfig of every other peer. + string effective_connection_mode = 50; + uint32 effective_relay_timeout_secs = 51; + uint32 effective_p2p_timeout_secs = 52; + uint32 effective_p2p_retry_max_secs = 53; + + // Phase 3.7i (#5989): list of capability keywords this client build + // supports. Old clients leave this empty (proto3 default for repeated + // fields). The management server uses this list to decide whether to + // fall back to legacy settings for clients that do not yet implement a + // feature - e.g. when ConnectionMode is p2p-dynamic and the client does + // not advertise "p2p_dynamic", mgmt downgrades it to p2p-lazy with the + // admin-configured legacy timeout. See client/system/features.go for + // the source of truth on which keywords this client build advertises. + repeated string supported_features = 60; } message LoginResponse { @@ -335,6 +363,48 @@ message PeerConfig { // Auto-update config AutoUpdateSettings autoUpdate = 8; + + // Tags 9 and 10 are intentionally left unused so that future small + // additions can land without re-numbering the new connection-mode + // fields. Reserved here to make the gap explicit for any reviewer. + reserved 9, 10; + + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode = 11; + + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + uint32 P2pTimeoutSeconds = 12; + + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + uint32 RelayTimeoutSeconds = 13; + + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + uint32 P2pRetryMaxSeconds = 14; +} + +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +enum ConnectionMode { + CONNECTION_MODE_UNSPECIFIED = 0; + CONNECTION_MODE_RELAY_FORCED = 1; + CONNECTION_MODE_P2P = 2; + CONNECTION_MODE_P2P_LAZY = 3; + CONNECTION_MODE_P2P_DYNAMIC = 4; } message AutoUpdateSettings { @@ -421,6 +491,40 @@ message RemotePeerConfig { string fqdn = 4; string agentVersion = 5; + + // Phase 3.7i (#5989): connection mode/timeouts the remote peer is + // actually running with (after env > local-cfg > server-pushed > legacy + // resolution), as reported by that peer in its PeerSystemMeta. Empty + // when remote peer pre-dates Phase 3.7i. + string effective_connection_mode = 6; + uint32 effective_relay_timeout_secs = 7; + uint32 effective_p2p_timeout_secs = 8; + uint32 effective_p2p_retry_max_secs = 9; + + // Connection mode/timeouts the management server has configured for + // that peer via dashboard policy/group. UI compares effective vs + // configured to spot local overrides (≠ → ⚠). + string configured_connection_mode = 10; + uint32 configured_relay_timeout_secs = 11; + uint32 configured_p2p_timeout_secs = 12; + uint32 configured_p2p_retry_max_secs = 13; + + // Phase 3.7i: server-knowledge fields surfaced to UIs without an + // extra Mgmt API call (already in the NetworkMap stream context). + google.protobuf.Timestamp last_seen_at_server = 14; + repeated string groups = 15; + // Live online flag: peer.Status.Connected on the management server. + // True = peer is currently heartbeating. False = peer hasn't checked + // in (hardware/network down) but its login is still valid (otherwise + // it would be in OfflinePeers, not RemotePeers). + bool live_online = 16; + + // Server-knowledge marker: true when the management server is new + // enough to populate live_online authoritatively. Old servers leave + // this field at false (default), and new clients then fall back to + // legacy heuristics (assume online when live_online is false but + // last_seen_at_server is also unset, i.e. nothing is known). + bool server_liveness_known = 17; } // SSHConfig represents SSH configurations of a peer. @@ -684,3 +788,43 @@ message StopExposeRequest { } message StopExposeResponse {} + +// Phase 3.7i (#5989): per-peer connection-state push payload (encrypted +// body of SyncPeerConnections request). +message PeerConnectionMap { + uint64 seq = 1; + bool full_snapshot = 2; + repeated PeerConnectionEntry entries = 3; + uint64 in_response_to_nonce = 4; + // Phase 3.7i lifecycle hardening (Codex follow-up): random uint64 + // generated once per daemon process. Lets mgmt detect a daemon + // restart even if a stale unary RPC from the previous process + // arrives AFTER the new process's full snapshot. Mgmt drops any + // delta whose session_id doesn't match the cached entry's. + // Legacy clients send 0 (Phase 3.7i shipped without this field); + // mgmt falls back to seq-only behaviour for those. + uint64 session_id = 5; +} + +message PeerConnectionEntry { + string remote_pubkey = 1; + ConnType conn_type = 2; + google.protobuf.Timestamp last_handshake = 3; + uint32 latency_ms = 4; + string endpoint = 5; + string relay_server = 6; + uint64 rx_bytes = 7; + uint64 tx_bytes = 8; +} + +enum ConnType { + CONN_TYPE_UNSPECIFIED = 0; + CONN_TYPE_IDLE = 1; + CONN_TYPE_CONNECTING = 2; + CONN_TYPE_P2P = 3; + CONN_TYPE_RELAYED = 4; +} + +message PeerSnapshotRequest { + uint64 nonce = 1; +} diff --git a/shared/management/proto/management_grpc.pb.go b/shared/management/proto/management_grpc.pb.go index 39a34204115..e6dd35bf1bb 100644 --- a/shared/management/proto/management_grpc.pb.go +++ b/shared/management/proto/management_grpc.pb.go @@ -48,6 +48,11 @@ type ManagementServiceClient interface { // sync meta will evaluate the checks and update the peer meta with the result. // EncryptedMessage of the request has a body of Empty. SyncMeta(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + SyncPeerConnections(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) @@ -154,6 +159,15 @@ func (c *managementServiceClient) SyncMeta(ctx context.Context, in *EncryptedMes return out, nil } +func (c *managementServiceClient) SyncPeerConnections(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/management.ManagementService/SyncPeerConnections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *managementServiceClient) Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/management.ManagementService/Logout", in, out, opts...) @@ -255,6 +269,11 @@ type ManagementServiceServer interface { // sync meta will evaluate the checks and update the peer meta with the result. // EncryptedMessage of the request has a body of Empty. SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + SyncPeerConnections(context.Context, *EncryptedMessage) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(context.Context, *EncryptedMessage) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) @@ -293,6 +312,9 @@ func (UnimplementedManagementServiceServer) GetPKCEAuthorizationFlow(context.Con func (UnimplementedManagementServiceServer) SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SyncMeta not implemented") } +func (UnimplementedManagementServiceServer) SyncPeerConnections(context.Context, *EncryptedMessage) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncPeerConnections not implemented") +} func (UnimplementedManagementServiceServer) Logout(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } @@ -450,6 +472,24 @@ func _ManagementService_SyncMeta_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _ManagementService_SyncPeerConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptedMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServiceServer).SyncPeerConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ManagementService/SyncPeerConnections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServiceServer).SyncPeerConnections(ctx, req.(*EncryptedMessage)) + } + return interceptor(ctx, in, info, handler) +} + func _ManagementService_Logout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EncryptedMessage) if err := dec(in); err != nil { @@ -579,6 +619,10 @@ var ManagementService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SyncMeta", Handler: _ManagementService_SyncMeta_Handler, }, + { + MethodName: "SyncPeerConnections", + Handler: _ManagementService_SyncPeerConnections_Handler, + }, { MethodName: "Logout", Handler: _ManagementService_Logout_Handler,