diff --git a/client/android/client.go b/client/android/client.go index 37e17a36319..17a383c9778 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "slices" + "strings" "sync" "time" @@ -295,17 +296,50 @@ func (c *Client) SetInfoLogLevel() { // PeersList return with the list of the PeerInfos func (c *Client) PeersList() *PeerInfoArray { + // Refresh WireGuard counters (BytesRx/Tx + LastWireguardHandshake) + // from the kernel/uapi interface before snapshotting. Without this + // the Android UI sees the stale values that were last written when + // the peer was opened/closed (typically 0), because the desktop + // CLI's Status RPC is what normally drives RefreshWireGuardStats. + // Phase 3.7i. + if err := c.recorder.RefreshWireGuardStats(); err != nil { + log.Debugf("PeersList: refresh wg stats: %v", err) + } fullStatus := c.recorder.GetFullStatus() peerInfos := make([]PeerInfo, len(fullStatus.Peers)) for n, p := range fullStatus.Peers { pi := PeerInfo{ - p.IP, - p.FQDN, - int(p.ConnStatus), - PeerRoutes{routes: maps.Keys(p.GetRoutes())}, + IP: p.IP, + FQDN: p.FQDN, + ConnStatus: int(p.ConnStatus), + Routes: PeerRoutes{routes: maps.Keys(p.GetRoutes())}, + } + + // Phase 3.7i (#5989): enrichment fields. + pi.Relayed = p.Relayed + pi.ServerOnline = p.ServerOnline + pi.LocalIceCandidateEndpoint = p.LocalIceCandidateEndpoint + pi.RemoteIceCandidateEndpoint = p.RemoteIceCandidateEndpoint + pi.RelayServerAddress = p.RelayServerAddress + if !p.LastWireguardHandshake.IsZero() { + pi.LastWireguardHandshake = p.LastWireguardHandshake.Format(time.RFC3339) + } + if !p.RemoteLastSeenAtServer.IsZero() { + pi.LastSeenAtServer = p.RemoteLastSeenAtServer.Format(time.RFC3339) + } + pi.LatencyMs = p.Latency.Milliseconds() + pi.BytesRx = p.BytesRx + pi.BytesTx = p.BytesTx + pi.EffectiveConnectionMode = p.RemoteEffectiveConnectionMode + pi.ConfiguredConnectionMode = p.RemoteConfiguredConnectionMode + if len(p.RemoteGroups) > 0 { + pi.Groups = strings.Join(p.RemoteGroups, ",") } + // AgentVersion / OsVersion: peer.State does not expose these fields; + // left empty until daemon surfaces them (future phase). + peerInfos[n] = pi } return &PeerInfoArray{items: peerInfos} @@ -394,6 +428,102 @@ func (c *Client) RemoveConnectionListener() { c.recorder.RemoveConnectionListener() } +// GetServerPushedConnectionMode returns the canonical name of the +// connection mode the management server most recently pushed via +// PeerConfig (independent of any local profile/env override). Returns +// an empty string when the engine has not connected yet or the server +// has not pushed a value -- the Android UI then knows to display +// just "Follow server" without the (currently: ...) suffix. +func (c *Client) GetServerPushedConnectionMode() string { + cm := c.connMgrSafe() + if cm == nil { + return "" + } + return cm.ServerPushedMode().String() +} + +// GetServerPushedRelayTimeoutSecs returns the relay timeout in seconds +// most recently pushed by the management server, or 0 when no value +// has been received. Used by the Android UI as a hint. +func (c *Client) GetServerPushedRelayTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedRelayTimeoutSecs()) +} + +// GetServerPushedP2pTimeoutSecs returns the ICE-only timeout (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pTimeoutSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pTimeoutSecs()) +} + +// GetServerPushedP2pRetryMaxSecs returns the ICE-backoff cap (seconds) +// most recently pushed by the management server. +func (c *Client) GetServerPushedP2pRetryMaxSecs() int64 { + cm := c.connMgrSafe() + if cm == nil { + return 0 + } + return int64(cm.ServerPushedP2pRetryMaxSecs()) +} + +// GetConfiguredPeersTotal returns the total number of configured peers +// (server-online + server-offline). Phase 3.7i (#5989). +func (c *Client) GetConfiguredPeersTotal() int64 { + return int64(c.recorder.GetFullStatus().ConfiguredPeersTotal) +} + +// GetServerOnlinePeers returns the number of peers that are reachable via +// the server (P2P + Relayed + Idle). Phase 3.7i (#5989). +func (c *Client) GetServerOnlinePeers() int64 { + return int64(c.recorder.GetFullStatus().ServerOnlinePeers) +} + +// GetP2PConnectedPeers returns the number of peers connected via direct +// P2P (ICE). Phase 3.7i (#5989). +func (c *Client) GetP2PConnectedPeers() int64 { + return int64(c.recorder.GetFullStatus().P2PConnectedPeers) +} + +// GetRelayedConnectedPeers returns the number of peers connected via relay. +// Phase 3.7i (#5989). +func (c *Client) GetRelayedConnectedPeers() int64 { + return int64(c.recorder.GetFullStatus().RelayedConnectedPeers) +} + +// GetIdleOnlinePeers returns the number of peers that are online on the +// server but have no active connection yet. Phase 3.7i (#5989). +func (c *Client) GetIdleOnlinePeers() int64 { + return int64(c.recorder.GetFullStatus().IdleOnlinePeers) +} + +// GetServerOfflinePeers returns the number of peers that are not reachable +// via the server. Phase 3.7i (#5989). +func (c *Client) GetServerOfflinePeers() int64 { + return int64(c.recorder.GetFullStatus().ServerOfflinePeers) +} + +// connMgrSafe is a small helper that walks the Client -> ConnectClient +// -> Engine -> ConnMgr chain and returns nil at the first nil pointer. +// Each accessor that surfaces engine state to the Android UI uses it. +func (c *Client) connMgrSafe() *internal.ConnMgr { + cc := c.getConnectClient() + if cc == nil { + return nil + } + engine := cc.Engine() + if engine == nil { + return nil + } + return engine.ConnMgr() +} + func (c *Client) toggleRoute(command routeCommand) error { return command.toggleRoute() } diff --git a/client/android/peer_notifier.go b/client/android/peer_notifier.go index 4ec22f3ab45..1223da48b6a 100644 --- a/client/android/peer_notifier.go +++ b/client/android/peer_notifier.go @@ -17,6 +17,24 @@ type PeerInfo struct { FQDN string ConnStatus int Routes PeerRoutes + + // Phase 3.7i (#5989): per-peer enrichment fields. Strings for + // gomobile-friendliness (no time.Time / no []string). + Relayed bool + ServerOnline bool + LocalIceCandidateEndpoint string + RemoteIceCandidateEndpoint string + RelayServerAddress string + LastWireguardHandshake string // RFC3339; "" if zero + LastSeenAtServer string // RFC3339; "" if zero + LatencyMs int64 + BytesRx int64 + BytesTx int64 + EffectiveConnectionMode string + ConfiguredConnectionMode string + Groups string // comma-separated + AgentVersion string + OsVersion string } func (p *PeerInfo) GetPeerRoutes() *PeerRoutes { diff --git a/client/android/preferences.go b/client/android/preferences.go index c3c8eb3fbc9..79ea843895f 100644 --- a/client/android/preferences.go +++ b/client/android/preferences.go @@ -307,6 +307,91 @@ func (p *Preferences) SetBlockInbound(block bool) { p.configInput.BlockInbound = &block } +// GetConnectionMode returns the locally configured connection-mode override +// (canonical lower-kebab-case: "relay-forced", "p2p", "p2p-lazy", +// "p2p-dynamic", "follow-server"), or empty string if no local override +// is configured -- the daemon will then follow the server-pushed value. +func (p *Preferences) GetConnectionMode() (string, error) { + if p.configInput.ConnectionMode != nil { + return *p.configInput.ConnectionMode, nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return "", err + } + return cfg.ConnectionMode, nil +} + +// SetConnectionMode stores a local override for the connection mode. +// Pass an empty string to clear the override (revert to following the +// server-pushed value). +func (p *Preferences) SetConnectionMode(mode string) { + m := mode + p.configInput.ConnectionMode = &m +} + +// GetRelayTimeoutSeconds returns the locally configured relay-worker +// inactivity timeout in seconds, or 0 if no override is set (follow +// server-pushed value, or built-in default if the server has none). +func (p *Preferences) GetRelayTimeoutSeconds() (int64, error) { + if p.configInput.RelayTimeoutSeconds != nil { + return int64(*p.configInput.RelayTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.RelayTimeoutSeconds), nil +} + +// SetRelayTimeoutSeconds stores a local override for the relay timeout. +// Pass 0 to clear the override. +func (p *Preferences) SetRelayTimeoutSeconds(secs int64) { + v := uint32(secs) + p.configInput.RelayTimeoutSeconds = &v +} + +// GetP2pTimeoutSeconds returns the locally configured ICE-worker +// inactivity timeout in seconds (only effective in p2p-dynamic mode), +// or 0 if no override is set. +func (p *Preferences) GetP2pTimeoutSeconds() (int64, error) { + if p.configInput.P2pTimeoutSeconds != nil { + return int64(*p.configInput.P2pTimeoutSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pTimeoutSeconds), nil +} + +// SetP2pTimeoutSeconds stores a local override for the p2p timeout. +// Pass 0 to clear the override. +func (p *Preferences) SetP2pTimeoutSeconds(secs int64) { + v := uint32(secs) + p.configInput.P2pTimeoutSeconds = &v +} + +// GetP2pRetryMaxSeconds returns the locally configured cap on the +// per-peer ICE-failure backoff schedule, or 0 if no override is set. +func (p *Preferences) GetP2pRetryMaxSeconds() (int64, error) { + if p.configInput.P2pRetryMaxSeconds != nil { + return int64(*p.configInput.P2pRetryMaxSeconds), nil + } + cfg, err := profilemanager.ReadConfig(p.configInput.ConfigPath) + if err != nil { + return 0, err + } + return int64(cfg.P2pRetryMaxSeconds), nil +} + +// SetP2pRetryMaxSeconds stores a local override for the backoff cap. +// Pass 0 to clear the override. +func (p *Preferences) SetP2pRetryMaxSeconds(secs int64) { + v := uint32(secs) + p.configInput.P2pRetryMaxSeconds = &v +} + // Commit writes out the changes to the config file func (p *Preferences) Commit() error { _, err := profilemanager.UpdateOrCreateConfig(p.configInput) diff --git a/client/cmd/root.go b/client/cmd/root.go index 29d4328a1f7..a4e8e934976 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -39,6 +39,10 @@ const ( extraIFaceBlackListFlag = "extra-iface-blacklist" dnsRouteIntervalFlag = "dns-router-interval" enableLazyConnectionFlag = "enable-lazy-connection" + connectionModeFlag = "connection-mode" + relayTimeoutFlag = "relay-timeout" + p2pTimeoutFlag = "p2p-timeout" + p2pRetryMaxFlag = "p2p-retry-max" mtuFlag = "mtu" ) @@ -72,6 +76,10 @@ var ( anonymizeFlag bool dnsRouteInterval time.Duration lazyConnEnabled bool + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 mtu uint16 profilesDisabled bool updateSettingsDisabled bool @@ -192,6 +200,15 @@ func init() { upCmd.PersistentFlags().BoolVar(&rosenpassPermissive, rosenpassPermissiveFlag, false, "[Experimental] Enable Rosenpass in permissive mode to allow this peer to accept WireGuard connections without requiring Rosenpass functionality from peers that do not have Rosenpass enabled.") upCmd.PersistentFlags().BoolVar(&autoConnectDisabled, disableAutoConnectFlag, false, "Disables auto-connect feature. If enabled, then the client won't connect automatically when the service starts.") upCmd.PersistentFlags().BoolVar(&lazyConnEnabled, enableLazyConnectionFlag, false, "[Experimental] Enable the lazy connection feature. If enabled, the client will establish connections on-demand. Note: this setting may be overridden by management configuration.") + upCmd.PersistentFlags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + upCmd.PersistentFlags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + upCmd.PersistentFlags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default). Only effective in p2p-dynamic mode (Phase 2).") + upCmd.PersistentFlags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min). Effective in p2p-dynamic mode (Phase 3 of #5989).") } diff --git a/client/cmd/service.go b/client/cmd/service.go index 56d8a8726fa..f8e6e97fecd 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -57,6 +57,24 @@ func init() { installCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) reconfigureCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) + // Profile-level connection-mode + timeout flags. Same semantics as on + // `netbird up` but writeable at install time so server/headless + // installs can pre-seed the active profile before the daemon starts. + // Same package-level vars are shared with upCmd; on `up` they take + // effect through setupConfig(), here we apply them once before + // installing the service so the daemon picks them up on first run. + for _, c := range []*cobra.Command{installCmd, reconfigureCmd} { + c.Flags().StringVar(&connectionMode, connectionModeFlag, "", + "[Experimental] Peer connection mode: relay-forced, p2p, p2p-lazy, p2p-dynamic, or follow-server. "+ + "Overrides the server-pushed value when set. Use follow-server to clear a previously-set local override.") + c.Flags().Uint32Var(&relayTimeoutSecs, relayTimeoutFlag, 0, + "[Experimental] Relay-worker idle timeout in seconds. 0 = use server-pushed value (or built-in default).") + c.Flags().Uint32Var(&p2pTimeoutSecs, p2pTimeoutFlag, 0, + "[Experimental] ICE-worker idle timeout in seconds. 0 = use server-pushed value. Only effective in p2p-dynamic mode.") + c.Flags().Uint32Var(&p2pRetryMaxSecs, p2pRetryMaxFlag, 0, + "[Experimental] Maximum ICE-failure-backoff interval in seconds. 0 = use server-pushed value (or built-in default 15 min).") + } + rootCmd.AddCommand(serviceCmd) } diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index 2d45fa063d8..449c910ff51 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -15,6 +15,7 @@ import ( "github.com/kardianos/service" "github.com/spf13/cobra" + "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/util" ) @@ -131,6 +132,12 @@ var installCmd = &cobra.Command{ cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + // Persist any profile-level connection-mode/timeout flags that + // were explicitly set so the daemon picks them up on first start. + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + svcConfig, err := createServiceConfigForInstall() if err != nil { return err @@ -157,6 +164,52 @@ var installCmd = &cobra.Command{ }, } +// applyConnectionModeFlagsToProfile writes the connection-mode + +// timeout flags into the active profile's config file so the daemon +// will use them on its next startup. Only fields whose flag was +// explicitly set are touched; missing flags leave the existing +// profile values intact. Used by install + reconfigure so headless +// deployments can pre-seed everything in a single command. +func applyConnectionModeFlagsToProfile(cmd *cobra.Command) error { + anyChanged := false + for _, name := range []string{connectionModeFlag, relayTimeoutFlag, p2pTimeoutFlag, p2pRetryMaxFlag} { + if f := cmd.Flag(name); f != nil && f.Changed { + anyChanged = true + break + } + } + if !anyChanged { + return nil + } + + cfgPath := profilemanager.DefaultConfigPath + if configPath != "" { + cfgPath = configPath + } + if cfgPath == "" { + return fmt.Errorf("default config path is not set on this platform; pass --config") + } + + ic := profilemanager.ConfigInput{ConfigPath: cfgPath} + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } + if _, err := profilemanager.UpdateOrCreateConfig(ic); err != nil { + return fmt.Errorf("write profile %s: %w", cfgPath, err) + } + cmd.Println("connection-mode/timeout flags persisted to profile:", cfgPath) + return nil +} + var uninstallCmd = &cobra.Command{ Use: "uninstall", Short: "uninstalls NetBird service from system", @@ -207,6 +260,10 @@ This command will temporarily stop the service, update its configuration, and re cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) } + if err := applyConnectionModeFlagsToProfile(cmd); err != nil { + cmd.PrintErrf("Warning: failed to persist connection-mode flags: %v\n", err) + } + wasRunning, err := isServiceRunning() if err != nil && !errors.Is(err, ErrGetServiceStatus) { return fmt.Errorf("check service status: %w", err) diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index c24965e8d82..2bcae0717fe 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" clientProto "github.com/netbirdio/netbird/client/proto" client "github.com/netbirdio/netbird/client/server" @@ -135,7 +136,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(5*time.Minute), peer_connections.NewSnapshotRouter()) if err != nil { t.Fatal(err) } diff --git a/client/cmd/up.go b/client/cmd/up.go index f4136cb2343..cba3edddee9 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -439,6 +439,19 @@ func setupSetConfigReq(customDNSAddressConverted []byte, cmd *cobra.Command, pro req.LazyConnectionEnabled = &lazyConnEnabled } + if cmd.Flag(connectionModeFlag).Changed { + req.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + req.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + req.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + req.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } + return &req } @@ -555,6 +568,19 @@ func setupConfig(customDNSAddressConverted []byte, cmd *cobra.Command, configFil if cmd.Flag(enableLazyConnectionFlag).Changed { ic.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + ic.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + ic.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + ic.P2pTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + ic.P2pRetryMaxSeconds = &p2pRetryMaxSecs + } return &ic, nil } @@ -669,6 +695,19 @@ func setupLoginRequest(providedSetupKey string, customDNSAddressConverted []byte if cmd.Flag(enableLazyConnectionFlag).Changed { loginRequest.LazyConnectionEnabled = &lazyConnEnabled } + + if cmd.Flag(connectionModeFlag).Changed { + loginRequest.ConnectionMode = &connectionMode + } + if cmd.Flag(relayTimeoutFlag).Changed { + loginRequest.RelayTimeoutSeconds = &relayTimeoutSecs + } + if cmd.Flag(p2pTimeoutFlag).Changed { + loginRequest.P2PTimeoutSeconds = &p2pTimeoutSecs + } + if cmd.Flag(p2pRetryMaxFlag).Changed { + loginRequest.P2PRetryMaxSeconds = &p2pRetryMaxSecs + } return &loginRequest, nil } diff --git a/client/internal/conn_mgr.go b/client/internal/conn_mgr.go index 112559132a1..159b5535be1 100644 --- a/client/internal/conn_mgr.go +++ b/client/internal/conn_mgr.go @@ -14,6 +14,8 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/peerstore" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) // ConnMgr coordinates both lazy connections (established on-demand) and permanent peer connections. @@ -28,9 +30,44 @@ type ConnMgr struct { peerStore *peerstore.Store statusRecorder *peer.Status iface lazyconn.WGIface - enabledLocally bool rosenpassEnabled bool + // Resolved values used to drive lifecycle decisions. Updated when + // the management server pushes a new PeerConfig. + mode connectionmode.Mode + relayTimeoutSecs uint32 + // Phase 2 (#5989): ICE-only inactivity timeout (seconds). Used in + // ModeP2PDynamic to teardown the ICE worker without affecting the + // relay tunnel. 0 = ICE never times out. + p2pTimeoutSecs uint32 + // Phase 3 (#5989): maximum seconds between P2P retry attempts. + // 0 means the daemon uses its built-in default. + p2pRetryMaxSecs uint32 + + // Raw inputs kept so we can re-resolve when server-pushed value changes. + envMode connectionmode.Mode + envRelayTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + cfgP2pRetryMax uint32 + + // spMu protects all serverPushed* fields below. Written in + // UpdatedRemotePeerConfig (NetworkMap goroutine), read by + // ServerPushed*() accessors (daemon-RPC GetConfig goroutine). + spMu sync.RWMutex + + // serverPushedMode is the ConnectionMode value that was last received + // from the management server's PeerConfig (independent of any local + // env/cfg override). Updated in UpdatedRemotePeerConfig. Used by the + // Android UI to display "Follow server (currently: )" in the + // connection-mode override dropdown so users can see what they would + // inherit if they leave the override on "Follow server". + serverPushedMode connectionmode.Mode + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + lazyConnMgr *manager.Manager wg sync.WaitGroup @@ -39,72 +76,273 @@ type ConnMgr struct { } func NewConnMgr(engineConfig *EngineConfig, statusRecorder *peer.Status, peerStore *peerstore.Store, iface lazyconn.WGIface) *ConnMgr { - e := &ConnMgr{ + envMode, envRelayTimeout := peer.ResolveModeFromEnv() + + // First-pass resolution without server input -- updated later when + // the first NetworkMap arrives via UpdatedRemotePeerConfig. + mode, relayTimeout, p2pTimeout, p2pRetryMax := resolveConnectionMode( + envMode, envRelayTimeout, + engineConfig.ConnectionMode, engineConfig.RelayTimeoutSeconds, + engineConfig.P2pTimeoutSeconds, + engineConfig.P2pRetryMaxSeconds, + nil, + ) + + return &ConnMgr{ peerStore: peerStore, statusRecorder: statusRecorder, iface: iface, rosenpassEnabled: engineConfig.RosenpassEnabled, + mode: mode, + relayTimeoutSecs: relayTimeout, + p2pTimeoutSecs: p2pTimeout, + p2pRetryMaxSecs: p2pRetryMax, + envMode: envMode, + envRelayTimeout: envRelayTimeout, + cfgMode: engineConfig.ConnectionMode, + cfgRelayTimeout: engineConfig.RelayTimeoutSeconds, + cfgP2pTimeout: engineConfig.P2pTimeoutSeconds, + cfgP2pRetryMax: engineConfig.P2pRetryMaxSeconds, + } +} + +// resolveConnectionMode applies the spec-section-4.1 precedence chain: +// 1. client env (already resolved by caller via peer.ResolveModeFromEnv) +// 2. client config (from profile, including the FollowServer sentinel) +// 3. server-pushed PeerConfig.ConnectionMode (with UNSPECIFIED -> +// legacy LazyConnectionEnabled fallback) +// +// Returns the resolved Mode, the resolved relay-timeout in seconds, and +// the resolved p2p-timeout in seconds. 0 for either timeout means the +// caller should use its built-in default. +func resolveConnectionMode( + envMode connectionmode.Mode, + envRelayTimeout uint32, + cfgMode connectionmode.Mode, + cfgRelayTimeout uint32, + cfgP2pTimeout uint32, + cfgP2pRetryMax uint32, + serverPC *mgmProto.PeerConfig, +) (connectionmode.Mode, uint32, uint32, uint32) { + mode := envMode + if mode == connectionmode.ModeUnspecified { + if cfgMode != connectionmode.ModeUnspecified && cfgMode != connectionmode.ModeFollowServer { + mode = cfgMode + } + } + if mode == connectionmode.ModeUnspecified { + if serverPC != nil { + serverMode := connectionmode.FromProto(serverPC.GetConnectionMode()) + if serverMode != connectionmode.ModeUnspecified { + mode = serverMode + } else { + mode = connectionmode.ResolveLegacyLazyBool(serverPC.GetLazyConnectionEnabled()) + } + } else { + mode = connectionmode.ModeP2P // safe default when nothing at all is known + } } - if engineConfig.LazyConnectionEnabled || lazyconn.IsLazyConnEnabledByEnv() { - e.enabledLocally = true + + // Relay-timeout precedence (analog). + relay := envRelayTimeout + if relay == 0 { + relay = cfgRelayTimeout + } + if relay == 0 && serverPC != nil { + relay = serverPC.GetRelayTimeoutSeconds() + } + + // P2P-timeout precedence: client config wins over server push. No env + // var in Phase 2; reserved for Phase 3. + p2p := cfgP2pTimeout + if p2p == 0 && serverPC != nil { + p2p = serverPC.GetP2PTimeoutSeconds() } - return e + + // P2pRetryMax resolution (analogous to p2p timeout): + // client-config wins over server-pushed value (0 = not set). + p2pRetryMax := cfgP2pRetryMax + if p2pRetryMax == 0 && serverPC != nil { + p2pRetryMax = serverPC.GetP2PRetryMaxSeconds() + } + + return mode, relay, p2p, p2pRetryMax } -// Start initializes the connection manager and starts the lazy connection manager if enabled by env var or cmd line option. +// Start initializes the connection manager. The lazy/dynamic connection +// manager is brought up immediately when the resolved Mode is P2PLazy +// or P2PDynamic. Other modes keep the manager dormant; it can still be +// activated later via UpdatedRemotePeerConfig. func (e *ConnMgr) Start(ctx context.Context) { if e.lazyConnMgr != nil { - log.Errorf("lazy connection manager is already started") + log.Errorf("lazy/dynamic connection manager is already started") return } - - if !e.enabledLocally { - log.Infof("lazy connection manager is disabled") + if !modeUsesLazyMgr(e.mode) { + log.Infof("lazy/dynamic connection manager is disabled (mode=%s)", e.mode) return } - if e.rosenpassEnabled { - log.Warnf("rosenpass connection manager is enabled, lazy connection manager will not be started") + log.Warnf("rosenpass enabled, lazy/dynamic connection manager will not be started") return } - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) + e.startModeSideEffects() } -// UpdatedRemoteFeatureFlag is called when the remote feature flag is updated. -// If enabled, it initializes the lazy connection manager and start it. Do not need to call Start() again. -// If disabled, then it closes the lazy connection manager and open the connections to all peers. -func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { - // do not disable lazy connection manager if it was enabled by env var - if e.enabledLocally { - return nil +// modeUsesLazyMgr is true for the modes whose lifecycle is driven by the +// lazyconn.Manager (which now hosts the two-timer inactivity manager +// since Phase 2). Eager modes (p2p, relay-forced) do not need it. +func modeUsesLazyMgr(m connectionmode.Mode) bool { + return m == connectionmode.ModeP2PLazy || m == connectionmode.ModeP2PDynamic +} + +// startModeSideEffects flips the per-mode goroutines and status flags +// that need to follow a successful initLazyManager. Called by Start() +// and by the management-push transition path. +func (e *ConnMgr) startModeSideEffects() { + // Both lazy AND dynamic are "lazy" from the status-recorder's + // perspective (peers are not eagerly opened; they wait for activity). + // The "Lazy connection: true/false" line in `netbird status` reflects + // this user-visible distinction, not the internal flavor. + if e.mode == connectionmode.ModeP2PLazy || e.mode == connectionmode.ModeP2PDynamic { + e.statusRecorder.UpdateLazyConnection(true) + } + if e.mode == connectionmode.ModeP2PDynamic { + e.wg.Add(1) + go func() { + defer e.wg.Done() + e.runDynamicInactivityLoop(e.lazyCtx) + }() } +} - if enabled { - // if the lazy connection manager is already started, do not start it again - if e.lazyConnMgr != nil { - return nil +// runDynamicInactivityLoop reads from the two-timer inactivity channels +// exposed by the inactivity.Manager and dispatches per-peer teardown. +// +// ICEInactiveChan: detach the ICE worker for each listed peer; the +// relay tunnel is left running so traffic still flows. +// +// RelayInactiveChan: close the whole connection. The activity-detector +// will reopen it when the next outbound packet arrives. +// +// Only meaningful in p2p-dynamic mode; in p2p-lazy the iceTimeout is 0 +// and ICEInactiveChan never fires, so the loop is a passthrough. +func (e *ConnMgr) runDynamicInactivityLoop(ctx context.Context) { + if e.lazyConnMgr == nil { + return + } + im := e.lazyConnMgr.InactivityManager() + if im == nil { + return + } + log.Infof("p2p-dynamic inactivity loop started (iceTimeout=%ds, relayTimeout=%ds)", e.p2pTimeoutSecs, e.relayTimeoutSecs) + defer log.Infof("p2p-dynamic inactivity loop stopped") + for { + select { + case <-ctx.Done(): + return + case peers := <-im.ICEInactiveChan(): + for peerKey := range peers { + if err := e.DetachICEForPeer(peerKey); err != nil { + log.Warnf("DetachICEForPeer(%s): %v", peerKey, err) + } + } + case peers := <-im.RelayInactiveChan(): + for peerKey := range peers { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.Log.Infof("relay-inactivity timeout, closing peer connection") + conn.Close(false) + } + } } + } +} - if e.rosenpassEnabled { - log.Infof("rosenpass connection manager is enabled, lazy connection manager will not be started") - return nil +// UpdatedRemotePeerConfig is called when the management server pushes a +// new PeerConfig. Re-resolves the effective mode through the precedence +// chain and starts/stops the lazy manager accordingly. +func (e *ConnMgr) UpdatedRemotePeerConfig(ctx context.Context, pc *mgmProto.PeerConfig) error { + // Capture the raw server-pushed values before resolution so the UI + // can surface them independently of any local override. + if pc != nil { + serverMode := connectionmode.FromProto(pc.GetConnectionMode()) + if serverMode == connectionmode.ModeUnspecified { + serverMode = connectionmode.ResolveLegacyLazyBool(pc.GetLazyConnectionEnabled()) } + e.spMu.Lock() + e.serverPushedMode = serverMode + e.serverPushedRelayTimeoutSecs = pc.GetRelayTimeoutSeconds() + e.serverPushedP2pTimeoutSecs = pc.GetP2PTimeoutSeconds() + e.serverPushedP2pRetryMaxSecs = pc.GetP2PRetryMaxSeconds() + e.spMu.Unlock() + } - log.Warnf("lazy connection manager is enabled by management feature flag") - e.initLazyManager(ctx) - e.statusRecorder.UpdateLazyConnection(true) - return e.addPeersToLazyConnManager() - } else { - if e.lazyConnMgr == nil { - return nil - } - log.Infof("lazy connection manager is disabled by management feature flag") + newMode, newRelay, newP2P, newP2pRetry := resolveConnectionMode( + e.envMode, e.envRelayTimeout, e.cfgMode, e.cfgRelayTimeout, + e.cfgP2pTimeout, e.cfgP2pRetryMax, pc, + ) + + if newMode == e.mode && newRelay == e.relayTimeoutSecs && + newP2P == e.p2pTimeoutSecs && newP2pRetry == e.p2pRetryMaxSecs { + return nil + } + prev := e.mode + e.mode = newMode + e.relayTimeoutSecs = newRelay + e.p2pTimeoutSecs = newP2P + e.p2pRetryMaxSecs = newP2pRetry + e.propagateP2pRetryMaxToConns() + + wasManaged := modeUsesLazyMgr(prev) + isManaged := modeUsesLazyMgr(newMode) + modeChanged := prev != newMode + + if modeChanged && wasManaged && !isManaged { + log.Infof("lazy/dynamic connection manager disabled by management push (mode=%s)", newMode) e.closeManager(ctx) e.statusRecorder.UpdateLazyConnection(false) return nil } + + if modeChanged && wasManaged && isManaged { + // Switching between lazy and dynamic at runtime: tear down the + // existing manager so initLazyManager picks up the new timeouts. + log.Infof("lazy/dynamic mode change %s -> %s, restarting manager", prev, newMode) + e.closeManager(ctx) + e.statusRecorder.UpdateLazyConnection(false) + } + + if isManaged && e.lazyConnMgr == nil { + if e.rosenpassEnabled { + log.Warnf("rosenpass enabled, ignoring lazy/dynamic mode push") + return nil + } + log.Infof("lazy/dynamic connection manager enabled by management push (mode=%s)", newMode) + e.initLazyManager(ctx) + e.startModeSideEffects() + // Phase 3.7i: when management activates lazy/dynamic mode at + // runtime we must reset all existing peer connections through + // the lazy/idle entry. The previous AddActivePeers path kept + // every already-open WireGuard tunnel running and only started + // the inactivity timers from "now" -- callers expected the new + // mode to apply immediately ("Idle until traffic"), not "stay + // open until 3 hours from now". Brief packet loss (~1-2 s per + // peer while the tunnel is rebuilt) is acceptable; mode changes + // are rare and almost always intentional. + return e.resetPeersToLazyIdle(ctx) + } + return nil +} + +// UpdatedRemoteFeatureFlag is the legacy entry point that only knows the +// boolean LazyConnectionEnabled field. Kept as a thin shim that builds a +// synthetic PeerConfig and delegates to UpdatedRemotePeerConfig. +// +// Deprecated: callers should switch to UpdatedRemotePeerConfig and pass +// the real PeerConfig so the new ConnectionMode + timeouts propagate. +func (e *ConnMgr) UpdatedRemoteFeatureFlag(ctx context.Context, enabled bool) error { + return e.UpdatedRemotePeerConfig(ctx, &mgmProto.PeerConfig{LazyConnectionEnabled: enabled}) } // UpdateRouteHAMap updates the route HA mappings in the lazy connection manager @@ -230,17 +468,89 @@ func (e *ConnMgr) ActivatePeer(ctx context.Context, conn *peer.Conn) { conn.Log.Errorf("failed to open connection: %v", err) } } + + // p2p-dynamic: re-attach ICE on EVERY signal trigger, not only on + // the lazy-manager's first activity edge. The runDynamicInactivityLoop + // path (DetachICEForPeer when iceTimeout fires) leaves the peer in an + // "inactivity-with-ICE-detached" sub-state that the lazy manager does + // not represent. Without this re-arm, subsequent remote OFFERs would + // reach handshaker.Listen() with iceListener==nil and be silently + // dropped, leaving the peer stuck on relay even though both sides + // are signaling normally. AttachICE is idempotent (no-op if listener + // already attached) and honors iceBackoff.IsSuspended() so the + // failure-backoff is not bypassed. + if e.mode == connectionmode.ModeP2PDynamic { + if err := conn.AttachICE(); err != nil { + conn.Log.Warnf("AttachICE on signal activity: %v", err) + } + } } -// DeactivatePeer deactivates a peer connection in the lazy connection manager. -// If locally the lazy connection is disabled, we force the peer connection open. +// deactivateAction selects what DeactivatePeer should do when the remote +// peer signals GO_IDLE. The dispatch is a pure function of the locally +// resolved connection mode. +type deactivateAction int + +const ( + deactivateNoop deactivateAction = iota + deactivateLazy + deactivateICE +) + +// deactivatePeerAction returns the per-mode deactivation rule. Eager +// modes (p2p, relay-forced, unspecified) ignore GO_IDLE because they +// are meant to keep tunnels always-on. p2p-lazy delegates to the lazy +// connection manager so the whole tunnel is torn down. p2p-dynamic +// detaches only the ICE worker so the relay tunnel stays up. +func (e *ConnMgr) deactivatePeerAction() deactivateAction { + switch e.mode { + case connectionmode.ModeP2PLazy: + return deactivateLazy + case connectionmode.ModeP2PDynamic: + return deactivateICE + default: + return deactivateNoop + } +} + +// DeactivatePeer is invoked when the remote peer signals GO_IDLE. The +// behavior is per-mode (see deactivatePeerAction). Phase 2 fix for the +// lazy/eager mismatch in #5989: previously this method silently no-op'd +// whenever the local manager was not in lazy mode, so a remote lazy +// peer's GO_IDLE was effectively dropped and the eager local end kept +// the peer awake. func (e *ConnMgr) DeactivatePeer(conn *peer.Conn) { - if !e.isStartedWithLazyMgr() { + switch e.deactivatePeerAction() { + case deactivateLazy: + if !e.isStartedWithLazyMgr() { + return + } + conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") + e.lazyConnMgr.DeactivatePeer(conn.ConnID()) + case deactivateICE: + conn.Log.Infof("detaching ICE worker: remote peer signaled GO_IDLE (p2p-dynamic)") + if err := e.DetachICEForPeer(conn.GetKey()); err != nil { + conn.Log.Warnf("DetachICEForPeer failed: %v", err) + } + case deactivateNoop: + // Eager modes keep the tunnel up unconditionally. return } +} - conn.Log.Infof("closing peer connection: remote peer initiated inactive, idle lazy state and sent GOAWAY") - e.lazyConnMgr.DeactivatePeer(conn.ConnID()) +// DetachICEForPeer looks up the Conn for peerKey and tears down its +// ICE worker without touching the relay tunnel. Used by: +// - DeactivatePeer when the remote peer sends GO_IDLE (p2p-dynamic) +// - the inactivity manager when the iceTimeout elapses (wired in +// engine.go runDynamicInactivityLoop) +// +// Missing peers are not an error; they may have been removed concurrently. +func (e *ConnMgr) DetachICEForPeer(peerKey string) error { + conn, ok := e.peerStore.PeerConn(peerKey) + if !ok { + return nil + } + return conn.DetachICE() } func (e *ConnMgr) Close() { @@ -257,6 +567,12 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { cfg := manager.Config{ InactivityThreshold: inactivityThresholdEnv(), } + if e.relayTimeoutSecs > 0 { + cfg.RelayInactivityThreshold = time.Duration(e.relayTimeoutSecs) * time.Second + } + if e.mode == connectionmode.ModeP2PDynamic && e.p2pTimeoutSecs > 0 { + cfg.ICEInactivityThreshold = time.Duration(e.p2pTimeoutSecs) * time.Second + } e.lazyConnMgr = manager.NewManager(cfg, engineCtx, e.peerStore, e.iface) e.lazyCtx, e.lazyCtxCancel = context.WithCancel(engineCtx) @@ -268,6 +584,34 @@ func (e *ConnMgr) initLazyManager(engineCtx context.Context) { }() } +// propagateP2pRetryMaxToConns iterates all active Conn instances and +// updates their iceBackoff.SetMaxBackoff. Called when the server pushes +// a new value via UpdatedRemotePeerConfig. Phase 3 of #5989. +func (e *ConnMgr) propagateP2pRetryMaxToConns() { + const sentinelDisabled = ^uint32(0) + v := e.p2pRetryMaxSecs + var d time.Duration + switch v { + case sentinelDisabled: + d = 0 // user-explicit disable + case 0: + d = peer.DefaultP2PRetryMax // server NULL -> use daemon default + default: + d = time.Duration(v) * time.Second + } + for _, peerKey := range e.peerStore.PeersPubKey() { + if conn, ok := e.peerStore.PeerConn(peerKey); ok { + conn.SetIceBackoffMax(d) + } + } +} + +// addPeersToLazyConnManager is currently unused (callers were migrated +// to per-peer activation in Phase 2 of #5989). Kept for reference and +// for the eventual full-batch wakeup path; revisit when the lazyconn +// manager grows a snapshot-import API. +// +//nolint:unused // see comment above func (e *ConnMgr) addPeersToLazyConnManager() error { peers := e.peerStore.PeersPubKey() lazyPeerCfgs := make([]lazyconn.PeerConfig, 0, len(peers)) @@ -291,6 +635,65 @@ func (e *ConnMgr) addPeersToLazyConnManager() error { return e.lazyConnMgr.AddActivePeers(lazyPeerCfgs) } +// resetPeersToLazyIdle closes every currently-open peer connection and +// re-registers it via the standard AddPeer (idle) entry of the lazy +// manager. Used when management activates lazy/dynamic mode at runtime: +// without this, AddActivePeers would keep all existing tunnels running +// until their inactivity timers fired, contradicting the user-visible +// promise of lazy/dynamic ("idle until traffic"). +// +// Peers with daemon versions that don't support lazy connection, peers +// on the exclude list, and any AddPeer error fall back to eager Open() +// to preserve current behaviour for those edge cases. Net effect for +// the common case: every supported peer flips from Connected -> Idle +// and waits for the next outbound payload packet. +func (e *ConnMgr) resetPeersToLazyIdle(ctx context.Context) error { + for _, peerID := range e.peerStore.PeersPubKey() { + peerConn, ok := e.peerStore.PeerConn(peerID) + if !ok { + log.Warnf("failed to find peer conn for peerID: %s", peerID) + continue + } + + // Tear the tunnel down. signalToRemote=true so the remote peer + // also drops its half (otherwise it would keep the tunnel half- + // open until its own ICE backoff fired). + peerConn.Close(true) + + if !lazyconn.IsSupported(peerConn.AgentVersionString()) { + peerConn.Log.Warnf("peer does not support lazy connection (%s), opening permanent connection after mode reset", peerConn.AgentVersionString()) + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open connection after mode reset: %v", err) + } + continue + } + + lazyPeerCfg := lazyconn.PeerConfig{ + PublicKey: peerID, + AllowedIPs: peerConn.WgConfig().AllowedIps, + PeerConnID: peerConn.ConnID(), + Log: peerConn.Log, + } + excluded, err := e.lazyConnMgr.AddPeer(lazyPeerCfg) + if err != nil { + peerConn.Log.Errorf("failed to add peer to lazy conn manager during mode reset: %v", err) + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open connection after AddPeer error: %v", err) + } + continue + } + if excluded { + peerConn.Log.Infof("peer is on lazy conn manager exclude list, opening connection after mode reset") + if err := peerConn.Open(ctx); err != nil { + peerConn.Log.Errorf("failed to re-open excluded peer after mode reset: %v", err) + } + continue + } + peerConn.Log.Infof("peer reset to idle by lazy/dynamic mode change") + } + return nil +} + func (e *ConnMgr) closeManager(ctx context.Context) { if e.lazyConnMgr == nil { return @@ -309,6 +712,79 @@ func (e *ConnMgr) isStartedWithLazyMgr() bool { return e.lazyConnMgr != nil && e.lazyCtxCancel != nil } +// Mode returns the currently resolved connection mode. Used by the engine +// when constructing per-peer connections (Phase 1 forwards it into +// peer.ConnConfig in a follow-up commit). +func (e *ConnMgr) Mode() connectionmode.Mode { + return e.mode +} + +// RelayTimeout returns the resolved relay-worker idle timeout in seconds. +func (e *ConnMgr) RelayTimeout() uint32 { + return e.relayTimeoutSecs +} + +// P2pRetryMax returns the resolved cap in seconds for the ICE-failure +// backoff schedule. Wire-format sentinel uint32-max means "user-explicit +// disable"; callers must translate that to 0. Phase 3 of #5989. +func (e *ConnMgr) P2pRetryMax() uint32 { + return e.p2pRetryMaxSecs +} + +// P2pTimeout returns the resolved ICE-only inactivity timeout in +// seconds. Phase 2 of #5989. 0 = ICE never times out (for non-dynamic +// modes). Phase 3.7i adds this accessor so the engine can include it +// in PeerSystemMeta. +func (e *ConnMgr) P2pTimeout() uint32 { + return e.p2pTimeoutSecs +} + +// ServerPushedMode returns the connection mode the management server +// most recently pushed via PeerConfig (independent of any local env +// or config override). Returns ModeUnspecified if no PeerConfig has +// been received yet. Used by the Android UI to display "Follow server +// (currently: )" in the override dropdown. +func (e *ConnMgr) ServerPushedMode() connectionmode.Mode { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedMode +} + +// ServerPushedRelayTimeoutSecs returns the relay-worker idle-timeout +// (seconds) most recently pushed by the management server, or 0 if no +// PeerConfig has been received. Used by the Android UI as a hint in +// the override field. +func (e *ConnMgr) ServerPushedRelayTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedRelayTimeoutSecs +} + +// ServerPushedP2pTimeoutSecs returns the ICE-only inactivity timeout +// (seconds) most recently pushed by the management server. Only +// meaningful in p2p-dynamic mode. +func (e *ConnMgr) ServerPushedP2pTimeoutSecs() uint32 { + e.spMu.RLock() + defer e.spMu.RUnlock() + return e.serverPushedP2pTimeoutSecs +} + +// ServerPushedP2pRetryMaxSecs returns the ICE-failure backoff cap +// (seconds) most recently pushed by the management server. When the +// server has not pushed a value (Phase 1 management servers do not +// know about this field yet) the built-in DefaultP2PRetryMax is +// returned so the Android UI hint shows what value the daemon is +// actually using as fallback. +func (e *ConnMgr) ServerPushedP2pRetryMaxSecs() uint32 { + e.spMu.RLock() + v := e.serverPushedP2pRetryMaxSecs + e.spMu.RUnlock() + if v > 0 { + return v + } + return uint32(peer.DefaultP2PRetryMax / time.Second) +} + func inactivityThresholdEnv() *time.Duration { envValue := os.Getenv(lazyconn.EnvInactivityThreshold) if envValue == "" { diff --git a/client/internal/conn_mgr_test.go b/client/internal/conn_mgr_test.go new file mode 100644 index 00000000000..21f0c93d523 --- /dev/null +++ b/client/internal/conn_mgr_test.go @@ -0,0 +1,221 @@ +package internal + +import ( + "testing" + + "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/shared/connectionmode" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestResolveConnectionMode(t *testing.T) { + cases := []struct { + name string + envMode connectionmode.Mode + envTimeout uint32 + cfgMode connectionmode.Mode + cfgRelayTimeout uint32 + cfgP2pTimeout uint32 + serverPC *mgmProto.PeerConfig + wantMode connectionmode.Mode + wantRelay uint32 + wantP2P uint32 + }{ + { + name: "all unspecified, server says legacy false -> P2P", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: false}, + wantMode: connectionmode.ModeP2P, + }, + { + name: "all unspecified, server says legacy true -> P2P_LAZY", + serverPC: &mgmProto.PeerConfig{LazyConnectionEnabled: true}, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "server pushes new enum -> wins over legacy bool", + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + LazyConnectionEnabled: false, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "client config overrides server", + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "follow-server in client config clears local override", + cfgMode: connectionmode.ModeFollowServer, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + }, + wantMode: connectionmode.ModeP2PLazy, + }, + { + name: "env var beats client config", + envMode: connectionmode.ModeRelayForced, + cfgMode: connectionmode.ModeP2PLazy, + serverPC: &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + }, + wantMode: connectionmode.ModeRelayForced, + }, + { + name: "env timeout beats server timeout", + envTimeout: 42, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 100}, + wantMode: connectionmode.ModeP2P, + wantRelay: 42, + }, + { + name: "client config timeout beats server", + cfgRelayTimeout: 50, + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 200}, + wantMode: connectionmode.ModeP2P, + wantRelay: 50, + }, + { + name: "no env, no client, only server timeout", + serverPC: &mgmProto.PeerConfig{RelayTimeoutSeconds: 300}, + wantMode: connectionmode.ModeP2P, + wantRelay: 300, + }, + { + name: "nil serverPC defaults to P2P", + serverPC: nil, + wantMode: connectionmode.ModeP2P, + }, + { + name: "p2p-dynamic with server-pushed timeouts", + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 10800, RelayTimeoutSeconds: 86400}, + wantMode: connectionmode.ModeP2PDynamic, wantRelay: 86400, wantP2P: 10800, + }, + { + name: "client config p2p-timeout beats server", + cfgP2pTimeout: 555, + serverPC: &mgmProto.PeerConfig{ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, P2PTimeoutSeconds: 9999}, + wantMode: connectionmode.ModeP2PDynamic, wantP2P: 555, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + gotMode, gotRelay, gotP2P, _ := resolveConnectionMode(c.envMode, c.envTimeout, c.cfgMode, c.cfgRelayTimeout, c.cfgP2pTimeout, 0, c.serverPC) + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotRelay != c.wantRelay { + t.Errorf("relay-timeout = %v, want %v", gotRelay, c.wantRelay) + } + if gotP2P != c.wantP2P { + t.Errorf("p2p-timeout = %v, want %v", gotP2P, c.wantP2P) + } + }) + } +} + +func TestResolveConnectionMode_P2pRetryMax_NotSet(t *testing.T) { + // serverPC has 0 (= "not set") -> result is 0, daemon will use default + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 0, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Errorf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 0 { + t.Errorf("server-pushed 0 should pass through as 0, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ServerSet(t *testing.T) { + mode, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, 0, + &mgmProto.PeerConfig{ + ConnectionMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + P2PRetryMaxSeconds: 600, + }, + ) + if mode != connectionmode.ModeP2PDynamic { + t.Fatalf("expected p2p-dynamic, got %v", mode) + } + if retryMax != 600 { + t.Errorf("server-pushed 600 should win, got %d", retryMax) + } +} + +func TestResolveConnectionMode_P2pRetryMax_ClientCfgWins(t *testing.T) { + _, _, _, retryMax := resolveConnectionMode( + connectionmode.ModeUnspecified, 0, + connectionmode.ModeUnspecified, 0, 0, + 300, // cfgP2pRetryMax (client-side override) + &mgmProto.PeerConfig{ + P2PRetryMaxSeconds: 600, + }, + ) + if retryMax != 300 { + t.Errorf("client cfg should override server push, got %d", retryMax) + } +} + +// TestConnMgr_DetachICEForPeer_NotFound verifies that detaching ICE +// for a peer not in the store is a no-op (no error). The lookup miss +// can happen if a peer is removed concurrently with a GO_IDLE signal +// or an inactivity-manager fire. +func TestConnMgr_DetachICEForPeer_NotFound(t *testing.T) { + mgr := &ConnMgr{peerStore: peerstore.NewConnStore()} + + if err := mgr.DetachICEForPeer("unknown-peer-key"); err != nil { + t.Fatalf("DetachICEForPeer for unknown peer should be no-op, got %v", err) + } +} + +// TestConnMgr_deactivatePeerAction verifies the per-mode dispatch rule: +// p2p-dynamic detaches ICE, p2p-lazy delegates to the lazy manager, +// eager modes (p2p, relay-forced) are silent no-ops. This is the core +// fix for the lazy/eager mismatch (Phase 2 #5989). +func TestConnMgr_deactivatePeerAction(t *testing.T) { + cases := []struct { + mode connectionmode.Mode + want deactivateAction + }{ + {connectionmode.ModeP2P, deactivateNoop}, + {connectionmode.ModeRelayForced, deactivateNoop}, + {connectionmode.ModeUnspecified, deactivateNoop}, + {connectionmode.ModeP2PLazy, deactivateLazy}, + {connectionmode.ModeP2PDynamic, deactivateICE}, + } + for _, c := range cases { + t.Run(c.mode.String(), func(t *testing.T) { + mgr := &ConnMgr{mode: c.mode} + if got := mgr.deactivatePeerAction(); got != c.want { + t.Errorf("mode=%v action=%v want=%v", c.mode, got, c.want) + } + }) + } +} + +func TestConnMgr_ServerPushedFieldsAreRaceSafe(t *testing.T) { + cm := &ConnMgr{} + done := make(chan struct{}) + go func() { + for i := 0; i < 1000; i++ { + cm.spMu.Lock() + cm.serverPushedRelayTimeoutSecs = uint32(i) + cm.spMu.Unlock() + } + close(done) + }() + for i := 0; i < 1000; i++ { + _ = cm.ServerPushedRelayTimeoutSecs() + } + <-done +} diff --git a/client/internal/conn_state_pusher.go b/client/internal/conn_state_pusher.go new file mode 100644 index 00000000000..8977191be3a --- /dev/null +++ b/client/internal/conn_state_pusher.go @@ -0,0 +1,332 @@ +package internal + +import ( + "context" + "sync" + "time" + + "google.golang.org/protobuf/types/known/timestamppb" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// PeerStateChangeEvent is the per-peer connection-state snapshot the +// pusher receives from the engine. Phase 3.7i of #5989. +type PeerStateChangeEvent struct { + Pubkey string + ConnType mgmProto.ConnType + LastHandshake time.Time + LatencyMS uint32 + Endpoint string + RelayServer string + RxBytes uint64 + TxBytes uint64 +} + +// PushSink is the upstream Sync mgmt-client interface the pusher writes +// to. The Engine's mgmClient.SyncPeerConnections satisfies it. +type PushSink interface { + Push(ctx context.Context, m *mgmProto.PeerConnectionMap) error +} + +// PeerStateSource produces the current full snapshot of per-peer state +// when the pusher needs to compute a delta or build a full snapshot. +// The Engine's statusRecorder snapshot satisfies it. +type PeerStateSource interface { + SnapshotAllRemotePeers() []PeerStateChangeEvent +} + +type pusherTuning struct { + baseInterval time.Duration + maxInterval time.Duration + doubleAfter int +} + +var defaultTuning = pusherTuning{ + baseInterval: 60 * time.Second, + maxInterval: 300 * time.Second, + doubleAfter: 3, +} + +type connStatePusher struct { + sink PushSink + source PeerStateSource + tuning pusherTuning + + mu sync.Mutex + lastPushed map[string]PeerStateChangeEvent + seq uint64 + + events chan PeerStateChangeEvent + snapshotReq chan uint64 + initialReady chan struct{} // closed by TriggerInitialSnapshot + stop chan struct{} + wg sync.WaitGroup +} + +func newConnStatePusher(sink PushSink, source PeerStateSource) *connStatePusher { + return newConnStatePusherForTest(sink, source, defaultTuning) +} + +func newConnStatePusherForTest(sink PushSink, source PeerStateSource, t pusherTuning) *connStatePusher { + p := &connStatePusher{ + sink: sink, + source: source, + tuning: t, + lastPushed: make(map[string]PeerStateChangeEvent), + events: make(chan PeerStateChangeEvent, 64), + snapshotReq: make(chan uint64, 4), + initialReady: make(chan struct{}), + stop: make(chan struct{}), + } + p.wg.Add(1) + go p.loop() + return p +} + +// Stop cancels the loop goroutine and blocks until it exits. Idempotent +// at the close-channel level (calling Stop twice panics — caller's +// responsibility to call once). +func (p *connStatePusher) Stop() { + close(p.stop) + p.wg.Wait() +} + +// OnPeerStateChange enqueues a state-change event. Non-blocking — drops +// if the buffer is full (the next bulk tick will catch up via delta). +// +// Safe on a nil receiver: Engine.Stop nils e.connStatePusher before +// removeAllPeers runs, but the status-recorder listener registered in +// Engine.Start is still wired and may fire a few more events during +// peer cleanup. A nil-receiver no-op makes the cleanup path cheap and +// avoids a panic on the engine shutdown race. +func (p *connStatePusher) OnPeerStateChange(ev PeerStateChangeEvent) { + if p == nil { + return + } + select { + case p.events <- ev: + default: + } +} + +// OnSnapshotRequest enqueues a snapshot-request nonce. Non-blocking, +// coalescing — multiple requests in flight result in a single full +// snapshot with the latest nonce echoed. Nil-receiver safe for the +// same shutdown-race reason as OnPeerStateChange. +func (p *connStatePusher) OnSnapshotRequest(nonce uint64) { + if p == nil { + return + } + select { + case p.snapshotReq <- nonce: + default: + } +} + +// TriggerInitialSnapshot signals the loop that the engine has populated +// the peer-state source for the first time and the loop may now send +// its initial full snapshot to management. Idempotent — subsequent +// calls are no-ops. +// +// Without this, newConnStatePusher's loop would race with the engine's +// peer-population path: starting in engine.Start (before addNewPeers +// has run for the first NetworkMap), it would emit an empty snapshot, +// and management would not see real peers until either a state change +// or the 60 s heartbeat tick. +func (p *connStatePusher) TriggerInitialSnapshot() { + p.mu.Lock() + defer p.mu.Unlock() + select { + case <-p.initialReady: + // already triggered + default: + close(p.initialReady) + } +} + +func (p *connStatePusher) loop() { + defer p.wg.Done() + // Wait until the engine signals that the first NetworkMap has been + // applied (peers populated). Sending an initial full snapshot before + // peers exist would publish an empty map to management, which would + // only get repaired on the next per-peer state change or after the + // 60 s heartbeat. Bail out cleanly if Stop is called first. + select { + case <-p.initialReady: + case <-p.stop: + return + } + if p.source != nil { + p.flushFull(p.source.SnapshotAllRemotePeers(), 0) + } + interval := p.tuning.baseInterval + emptyTicks := 0 + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + select { + case <-p.stop: + return + case ev := <-p.events: + batch := []PeerStateChangeEvent{ev} + drain := true + for drain { + select { + case e2 := <-p.events: + batch = append(batch, e2) + default: + drain = false + } + } + p.flushDelta(batch) + interval = p.tuning.baseInterval + emptyTicks = 0 + timer.Reset(interval) + case nonce := <-p.snapshotReq: + if p.source != nil { + p.flushFull(p.source.SnapshotAllRemotePeers(), nonce) + } + interval = p.tuning.baseInterval + emptyTicks = 0 + timer.Reset(interval) + case <-timer.C: + delta := p.computeDeltaFromSource() + if len(delta) > 0 { + p.flushDelta(delta) + interval = p.tuning.baseInterval + emptyTicks = 0 + } else { + emptyTicks++ + if emptyTicks >= p.tuning.doubleAfter && interval < p.tuning.maxInterval { + interval *= 2 + if interval > p.tuning.maxInterval { + interval = p.tuning.maxInterval + } + emptyTicks = 0 + } + } + timer.Reset(interval) + } + } +} + +func (p *connStatePusher) flushDelta(events []PeerStateChangeEvent) { + if len(events) == 0 { + return + } + p.mu.Lock() + p.seq++ + seq := p.seq + p.mu.Unlock() + entries := make([]*mgmProto.PeerConnectionEntry, 0, len(events)) + for _, ev := range events { + entries = append(entries, eventToEntry(ev)) + } + if err := p.sink.Push(context.Background(), &mgmProto.PeerConnectionMap{ + Seq: seq, + FullSnapshot: false, + Entries: entries, + }); err != nil { + // Push failed (mgmt reconnect, transient gRPC error, etc.). + // Do NOT mark these events as lastPushed -- on the next tick + // the dirty-state computation will re-include them so the + // management server eventually catches up. Without this, a + // peer that flipped state during a brief mgmt outage would + // stay stale until its next state change or the 60 s heartbeat. + return + } + p.mu.Lock() + for _, ev := range events { + p.lastPushed[ev.Pubkey] = ev + } + p.mu.Unlock() +} + +func (p *connStatePusher) flushFull(events []PeerStateChangeEvent, inResponseToNonce uint64) { + p.mu.Lock() + p.seq++ + seq := p.seq + p.mu.Unlock() + entries := make([]*mgmProto.PeerConnectionEntry, 0, len(events)) + for _, ev := range events { + entries = append(entries, eventToEntry(ev)) + } + if err := p.sink.Push(context.Background(), &mgmProto.PeerConnectionMap{ + Seq: seq, + FullSnapshot: true, + Entries: entries, + InResponseToNonce: inResponseToNonce, + }); err != nil { + // Same dirty-retain semantics as flushDelta. A failed full + // snapshot leaves lastPushed unchanged so the next push (or + // the next snapshot request) will see every peer as dirty. + return + } + p.mu.Lock() + for _, ev := range events { + p.lastPushed[ev.Pubkey] = ev + } + p.mu.Unlock() +} + +func (p *connStatePusher) computeDeltaFromSource() []PeerStateChangeEvent { + if p.source == nil { + return nil + } + all := p.source.SnapshotAllRemotePeers() + p.mu.Lock() + defer p.mu.Unlock() + delta := make([]PeerStateChangeEvent, 0, len(all)) + for _, ev := range all { + prev, had := p.lastPushed[ev.Pubkey] + if !had || isMaterialChange(prev, ev) { + delta = append(delta, ev) + } + } + return delta +} + +// isMaterialChange decides whether ev's delta vs prev should generate a +// push. Always include conn_type/endpoint flips. Latency: include if +// |delta| >= 5 ms OR the handshake is newer (so any peer that's been +// actively talking AT ALL since the last push is reported, even if +// latency is stable). Phase 3.7i (rev 4 — was AND in rev 3, too +// conservative). +func isMaterialChange(prev, cur PeerStateChangeEvent) bool { + if prev.ConnType != cur.ConnType { + return true + } + if prev.Endpoint != cur.Endpoint { + return true + } + const latencyThresholdMS = 5 + d := int32(cur.LatencyMS) - int32(prev.LatencyMS) + if d < 0 { + d = -d + } + if d >= latencyThresholdMS { + return true + } + if cur.LastHandshake.After(prev.LastHandshake) { + return true + } + return false +} + +func eventToEntry(ev PeerStateChangeEvent) *mgmProto.PeerConnectionEntry { + e := &mgmProto.PeerConnectionEntry{ + RemotePubkey: ev.Pubkey, + ConnType: ev.ConnType, + LatencyMs: ev.LatencyMS, + Endpoint: ev.Endpoint, + RelayServer: ev.RelayServer, + RxBytes: ev.RxBytes, + TxBytes: ev.TxBytes, + } + if !ev.LastHandshake.IsZero() { + e.LastHandshake = timestamppb.New(ev.LastHandshake) + } + return e +} diff --git a/client/internal/conn_state_pusher_test.go b/client/internal/conn_state_pusher_test.go new file mode 100644 index 00000000000..674187ed2bd --- /dev/null +++ b/client/internal/conn_state_pusher_test.go @@ -0,0 +1,145 @@ +package internal + +import ( + "context" + "sync" + "testing" + "time" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type stubPushSink struct { + mu sync.Mutex + pushes []*mgmProto.PeerConnectionMap + notif chan struct{} +} + +func newStubSink() *stubPushSink { return &stubPushSink{notif: make(chan struct{}, 16)} } + +func (s *stubPushSink) Push(_ context.Context, m *mgmProto.PeerConnectionMap) error { + s.mu.Lock() + s.pushes = append(s.pushes, m) + s.mu.Unlock() + select { + case s.notif <- struct{}{}: + default: + } + return nil +} + +func (s *stubPushSink) waitForPush(t *testing.T, timeout time.Duration) { + t.Helper() + select { + case <-s.notif: + case <-time.After(timeout): + t.Fatal("timed out waiting for push") + } +} + +func (s *stubPushSink) snapshot() []*mgmProto.PeerConnectionMap { + s.mu.Lock() + defer s.mu.Unlock() + out := make([]*mgmProto.PeerConnectionMap, len(s.pushes)) + copy(out, s.pushes) + return out +} + +type stubPeerStateSource struct { + mu sync.Mutex + snapshot []PeerStateChangeEvent +} + +func (s *stubPeerStateSource) set(es []PeerStateChangeEvent) { + s.mu.Lock() + defer s.mu.Unlock() + s.snapshot = es +} + +func (s *stubPeerStateSource) SnapshotAllRemotePeers() []PeerStateChangeEvent { + s.mu.Lock() + defer s.mu.Unlock() + out := make([]PeerStateChangeEvent, len(s.snapshot)) + copy(out, s.snapshot) + return out +} + +func TestConnStatePusher_StateChangeIsPushedImmediately(t *testing.T) { + sink := newStubSink() + p := newConnStatePusher(sink, nil) + defer p.Stop() + // Engine normally does this after the first NetworkMap is applied; + // in unit tests we trigger immediately so the loop unblocks. + p.TriggerInitialSnapshot() + + p.OnPeerStateChange(PeerStateChangeEvent{ + Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, + }) + sink.waitForPush(t, 500*time.Millisecond) + + got := sink.snapshot() + if len(got) != 1 { + t.Fatalf("want 1 push, got %d", len(got)) + } + if got[0].GetFullSnapshot() { + t.Error("state-change push must not be full snapshot") + } +} + +func TestConnStatePusher_NoExtraPushesWhenSnapshotUnchanged(t *testing.T) { + sink := newStubSink() + src := &stubPeerStateSource{} + src.set([]PeerStateChangeEvent{{Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, LatencyMS: 10}}) + p := newConnStatePusherForTest(sink, src, + pusherTuning{baseInterval: 30 * time.Millisecond, maxInterval: 200 * time.Millisecond, doubleAfter: 2}) + defer p.Stop() + p.TriggerInitialSnapshot() + + sink.waitForPush(t, 500*time.Millisecond) + deadline := time.After(200 * time.Millisecond) + for { + select { + case <-deadline: + if got := sink.snapshot(); len(got) != 1 { + t.Fatalf("want exactly 1 push (initial snapshot), got %d", len(got)) + } + return + case <-sink.notif: + t.Fatal("unexpected push (delta should have been empty)") + } + } +} + +func TestConnStatePusher_OnSnapshotRequestSendsFullWithNonceEcho(t *testing.T) { + sink := newStubSink() + src := &stubPeerStateSource{} + src.set([]PeerStateChangeEvent{ + {Pubkey: "peerA", ConnType: mgmProto.ConnType_CONN_TYPE_P2P}, + {Pubkey: "peerB", ConnType: mgmProto.ConnType_CONN_TYPE_RELAYED}, + }) + p := newConnStatePusherForTest(sink, src, + pusherTuning{baseInterval: time.Hour, maxInterval: time.Hour, doubleAfter: 999}) + defer p.Stop() + p.TriggerInitialSnapshot() + sink.waitForPush(t, 500*time.Millisecond) // initial snapshot + sink.mu.Lock() + sink.pushes = nil + sink.mu.Unlock() + + p.OnSnapshotRequest(42) + sink.waitForPush(t, 500*time.Millisecond) + + got := sink.snapshot() + if len(got) != 1 { + t.Fatalf("want 1 push, got %d", len(got)) + } + if !got[0].GetFullSnapshot() { + t.Error("snapshot-request push must be full") + } + if got[0].GetInResponseToNonce() != 42 { + t.Errorf("want nonce echo 42, got %d", got[0].GetInResponseToNonce()) + } + if len(got[0].GetEntries()) != 2 { + t.Errorf("want 2 entries, got %d", len(got[0].GetEntries())) + } +} diff --git a/client/internal/connect.go b/client/internal/connect.go index 72e096a80a1..87768208ac1 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -25,6 +25,7 @@ import ( "github.com/netbirdio/netbird/client/internal/listener" "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/stdnet" @@ -566,6 +567,11 @@ func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConf LazyConnectionEnabled: config.LazyConnectionEnabled, + ConnectionMode: parseConnectionMode(config.ConnectionMode), + RelayTimeoutSeconds: config.RelayTimeoutSeconds, + P2pTimeoutSeconds: config.P2pTimeoutSeconds, + P2pRetryMaxSeconds: config.P2pRetryMaxSeconds, + MTU: selectMTU(config.MTU, peerConfig.Mtu), LogPath: logPath, @@ -695,3 +701,16 @@ func closeConnWithLog(conn *net.UDPConn) { log.Warnf("closing the testing port %d took %s. Usually it is safe to ignore, but continuous warnings may indicate a problem.", conn.LocalAddr().(*net.UDPAddr).Port, time.Since(startClosing)) } } + +// parseConnectionMode is a tolerant wrapper used by the EngineConfig builder. +// An invalid string in the persisted profile (e.g. left over from a +// downgrade-then-upgrade cycle) is logged and treated as Unspecified so the +// daemon falls through to env / server resolution rather than panicking. +func parseConnectionMode(s string) connectionmode.Mode { + m, err := connectionmode.ParseString(s) + if err != nil { + log.Warnf("ignoring invalid connection_mode %q in profile config: %v", s, err) + return connectionmode.ModeUnspecified + } + return m +} diff --git a/client/internal/debouncer/debouncer.go b/client/internal/debouncer/debouncer.go new file mode 100644 index 00000000000..004a03d4aac --- /dev/null +++ b/client/internal/debouncer/debouncer.go @@ -0,0 +1,53 @@ +// Package debouncer provides a small "trigger now or coalesce within a +// window" helper. Used by the engine to debounce SyncMeta calls. +package debouncer + +import ( + "sync" + "time" +) + +// Debouncer coalesces rapid successive Trigger calls: only the last fn +// registered within the delay window is executed, after the window +// expires. +type Debouncer struct { + delay time.Duration + mu sync.Mutex + timer *time.Timer + fn func() +} + +// New creates a Debouncer with the given delay window. +func New(delay time.Duration) *Debouncer { + return &Debouncer{delay: delay} +} + +// Trigger schedules fn to run after the configured delay. Subsequent +// Trigger calls within the window REPLACE the pending fn (last-write-wins) +// and reset the timer. +func (d *Debouncer) Trigger(fn func()) { + d.mu.Lock() + defer d.mu.Unlock() + d.fn = fn + if d.timer != nil { + d.timer.Stop() + } + d.timer = time.AfterFunc(d.delay, func() { + d.mu.Lock() + f := d.fn + d.mu.Unlock() + if f != nil { + f() + } + }) +} + +// Stop cancels any pending fn. Safe to call multiple times. +func (d *Debouncer) Stop() { + d.mu.Lock() + defer d.mu.Unlock() + if d.timer != nil { + d.timer.Stop() + d.timer = nil + } +} diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 0a12a5326e3..5679a5b97df 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -644,6 +644,12 @@ func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) configContent.WriteString(fmt.Sprintf("LazyConnectionEnabled: %v\n", g.internalConfig.LazyConnectionEnabled)) configContent.WriteString(fmt.Sprintf("MTU: %d\n", g.internalConfig.MTU)) + + // Phase 1+2+3 (#5989) connection-mode resolution + lifecycle timers. + configContent.WriteString(fmt.Sprintf("ConnectionMode: %s\n", g.internalConfig.ConnectionMode)) + configContent.WriteString(fmt.Sprintf("RelayTimeoutSeconds: %d\n", g.internalConfig.RelayTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pTimeoutSeconds: %d\n", g.internalConfig.P2pTimeoutSeconds)) + configContent.WriteString(fmt.Sprintf("P2pRetryMaxSeconds: %d\n", g.internalConfig.P2pRetryMaxSeconds)) } func (g *BundleGenerator) addProf() (err error) { diff --git a/client/internal/engine.go b/client/internal/engine.go index 7f19e2d2876..256246e5b38 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -34,6 +34,7 @@ import ( nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" + "github.com/netbirdio/netbird/client/internal/debouncer" "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/internal/dns" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" @@ -61,6 +62,7 @@ import ( "github.com/netbirdio/netbird/client/system" nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/connectionmode" mgm "github.com/netbirdio/netbird/shared/management/client" "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" @@ -137,6 +139,26 @@ type EngineConfig struct { LazyConnectionEnabled bool + // ConnectionMode is the resolved peer-connection mode for this daemon + // session. ModeUnspecified means "fall back to LazyConnectionEnabled". + // Set by the caller of NewEngine; usually populated from + // profilemanager.Config.ConnectionMode in connect.go. + ConnectionMode connectionmode.Mode + + // RelayTimeoutSeconds, when > 0, overrides the server-pushed relay + // timeout. 0 means "follow server-pushed value". + RelayTimeoutSeconds uint32 + + // P2pTimeoutSeconds, when > 0, overrides the server-pushed p2p timeout. + // 0 means "follow server-pushed value". Reserved for Phase 2 -- has no + // effect in Phase 1. + P2pTimeoutSeconds uint32 + + // P2pRetryMaxSeconds, when > 0, overrides the server-pushed + // p2p_retry_max_seconds. 0 = use server-pushed value (or built-in + // default 15 min). Phase 3 of #5989. + P2pRetryMaxSeconds uint32 + MTU uint16 // for debug bundle generation @@ -246,6 +268,13 @@ type Engine struct { jobExecutorWG sync.WaitGroup exposeManager *expose.Manager + + // Phase 3.7i (#5989): track last-pushed effective config to detect changes. + lastPushedEff mgm.EffectiveConnConfig + syncMetaDebouncer *debouncer.Debouncer + + // Phase 3.7i (#5989): per-peer connection-state pusher. + connStatePusher *connStatePusher } // Peer is an instance of the Connection Peer @@ -288,12 +317,24 @@ func NewEngine( jobExecutor: jobexec.NewExecutor(), clientMetrics: services.ClientMetrics, updateManager: services.UpdateManager, + syncMetaDebouncer: debouncer.New(5 * time.Second), } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) return engine } +// ConnMgr returns the engine's ConnMgr or nil if the engine has not been +// started yet (or has already shut down). Used by the Android UI to query +// the server-pushed connection mode for the dropdown's "Follow server" +// label. +func (e *Engine) ConnMgr() *ConnMgr { + if e == nil { + return nil + } + return e.connMgr +} + func (e *Engine) Stop() error { if e == nil { // this seems to be a very odd case but there was the possibility if the netbird down command comes before the engine is fully started @@ -302,10 +343,19 @@ func (e *Engine) Stop() error { } e.syncMsgMux.Lock() + if e.syncMetaDebouncer != nil { + e.syncMetaDebouncer.Stop() + } + if e.connMgr != nil { e.connMgr.Close() } + if e.connStatePusher != nil { + e.connStatePusher.Stop() + e.connStatePusher = nil + } + // stopping network monitor first to avoid starting the engine again if e.networkMonitor != nil { e.networkMonitor.Stop() @@ -574,8 +624,20 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.connMgr = NewConnMgr(e.config, e.statusRecorder, e.peerStore, wgIface) e.connMgr.Start(e.ctx) + // Phase 3.7i (#5989): start the per-peer connection-state pusher. + e.connStatePusher = newConnStatePusher( + &enginePushSink{engine: e}, + &enginePeerStateSource{engine: e}, + ) + e.statusRecorder.SetConnStateListener(func(pubkey string, st peer.State) { + e.connStatePusher.OnPeerStateChange(peerStateToEvent(pubkey, st)) + }) + e.mgmClient.SetSnapshotRequestHandler(func(nonce uint64) { + e.connStatePusher.OnSnapshotRequest(nonce) + }) + e.srWatcher = guard.NewSRWatcher(e.signal, e.relayManager, e.mobileDep.IFaceDiscover, iceCfg) - e.srWatcher.Start(peer.IsForceRelayed()) + e.srWatcher.Start(peer.IsForceRelayed()) //nolint:staticcheck // intentionally retained for Phase-1 backwards compat e.receiveSignalEvents() e.receiveManagementEvents() @@ -1231,8 +1293,49 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { return nil } - if err := e.connMgr.UpdatedRemoteFeatureFlag(e.ctx, networkMap.GetPeerConfig().GetLazyConnectionEnabled()); err != nil { - log.Errorf("failed to update lazy connection feature flag: %v", err) + if err := e.connMgr.UpdatedRemotePeerConfig(e.ctx, networkMap.GetPeerConfig()); err != nil { + log.Errorf("failed to update connection mode from PeerConfig: %v", err) + } + + // Phase 3.7i (#5989): record + push effective values. + newEff := mgm.EffectiveConnConfig{ + Mode: e.connMgr.Mode().String(), + RelayTimeoutSecs: e.connMgr.RelayTimeout(), + P2PTimeoutSecs: e.connMgr.P2pTimeout(), + P2PRetryMaxSecs: e.connMgr.P2pRetryMax(), + } + e.mgmClient.SetEffectiveConnConfig(newEff) + if e.lastPushedEff != newEff { + e.lastPushedEff = newEff + // Debounce SyncMeta so a burst of NetworkMap updates doesn't + // generate a burst of SyncMeta calls. + e.syncMetaDebouncer.Trigger(func() { + info, err := system.GetInfoWithChecks(e.ctx, e.checks) + if err != nil { + log.Warnf("failed to get system info for SyncMeta: %v", err) + info = system.GetInfo(e.ctx) + } + info.SetFlags( + e.config.RosenpassEnabled, + e.config.RosenpassPermissive, + &e.config.ServerSSHAllowed, + e.config.DisableClientRoutes, + e.config.DisableServerRoutes, + e.config.DisableDNS, + e.config.DisableFirewall, + e.config.BlockLANAccess, + e.config.BlockInbound, + e.config.LazyConnectionEnabled, + e.config.EnableSSHRoot, + e.config.EnableSSHSFTP, + e.config.EnableSSHLocalPortForwarding, + e.config.EnableSSHRemotePortForwarding, + e.config.DisableSSHAuth, + ) + if err := e.mgmClient.SyncMeta(info); err != nil { + log.Warnf("SyncMeta after effective-mode change: %v", err) + } + }) } if e.firewall != nil { @@ -1296,6 +1399,27 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { e.updateOfflinePeers(networkMap.GetOfflinePeers()) + // Phase 3.7i (#5989): populate RemoteMeta for offline peers so the + // daemon-RPC StatusResponse can show them with their groups + last_seen. + for _, op := range networkMap.GetOfflinePeers() { + if err := e.statusRecorder.UpdatePeerRemoteMeta(op.GetWgPubKey(), peer.RemoteMeta{ + EffectiveConnectionMode: op.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: op.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: op.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: op.GetEffectiveP2PRetryMaxSecs(), + ConfiguredConnectionMode: op.GetConfiguredConnectionMode(), + ConfiguredRelayTimeoutSecs: op.GetConfiguredRelayTimeoutSecs(), + ConfiguredP2PTimeoutSecs: op.GetConfiguredP2PTimeoutSecs(), + ConfiguredP2PRetryMaxSecs: op.GetConfiguredP2PRetryMaxSecs(), + Groups: op.GetGroups(), + LastSeenAtServer: peer.TimestampOrZero(op.GetLastSeenAtServer()), + LiveOnline: op.GetLiveOnline(), + ServerLivenessKnown: op.GetServerLivenessKnown(), + }); err != nil { + log.Debugf("UpdatePeerRemoteMeta(offline %s): %v", op.GetWgPubKey(), err) + } + } + // Filter out own peer from the remote peers list localPubKey := e.config.WgPrivateKey.PublicKey().String() remotePeers := make([]*mgmProto.RemotePeerConfig, 0, len(networkMap.GetRemotePeers())) @@ -1309,6 +1433,9 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { if networkMap.GetRemotePeersIsEmpty() { err := e.removeAllPeers() e.statusRecorder.FinishPeerListModifications() + if e.connStatePusher != nil { + e.connStatePusher.TriggerInitialSnapshot() + } if err != nil { return err } @@ -1329,6 +1456,12 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { } e.statusRecorder.FinishPeerListModifications() + // Phase 3.7i: peers are populated for the first time; release + // the conn-state pusher so its initial full snapshot reflects + // the actual peer set instead of an empty map. + if e.connStatePusher != nil { + e.connStatePusher.TriggerInitialSnapshot() + } e.updatePeerSSHHostKeys(remotePeers) @@ -1337,6 +1470,27 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { } e.updateSSHServerAuth(networkMap.GetSshAuth()) + + // Phase 3.7i (#5989): mirror RemotePeerConfig fields into peer.Status + // so daemon-RPC StatusResponse exposes them for UIs. + for _, rp := range remotePeers { + if err := e.statusRecorder.UpdatePeerRemoteMeta(rp.GetWgPubKey(), peer.RemoteMeta{ + EffectiveConnectionMode: rp.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: rp.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: rp.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: rp.GetEffectiveP2PRetryMaxSecs(), + ConfiguredConnectionMode: rp.GetConfiguredConnectionMode(), + ConfiguredRelayTimeoutSecs: rp.GetConfiguredRelayTimeoutSecs(), + ConfiguredP2PTimeoutSecs: rp.GetConfiguredP2PTimeoutSecs(), + ConfiguredP2PRetryMaxSecs: rp.GetConfiguredP2PRetryMaxSecs(), + Groups: rp.GetGroups(), + LastSeenAtServer: peer.TimestampOrZero(rp.GetLastSeenAtServer()), + LiveOnline: rp.GetLiveOnline(), + ServerLivenessKnown: rp.GetServerLivenessKnown(), + }); err != nil { + log.Debugf("UpdatePeerRemoteMeta(%s): %v", rp.GetWgPubKey(), err) + } + } } // must set the exclude list after the peers are added. Without it the manager can not figure out the peers parameters from the store @@ -1560,7 +1714,9 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV Addr: e.getRosenpassAddr(), PermissiveMode: e.config.RosenpassPermissive, }, - ICEConfig: e.createICEConfig(), + ICEConfig: e.createICEConfig(), + Mode: e.connMgr.Mode(), + P2pRetryMaxSeconds: e.connMgr.P2pRetryMax(), } serviceDependencies := peer.ServiceDependencies{ diff --git a/client/internal/engine_pusher_adapters.go b/client/internal/engine_pusher_adapters.go new file mode 100644 index 00000000000..bbca5397ee1 --- /dev/null +++ b/client/internal/engine_pusher_adapters.go @@ -0,0 +1,62 @@ +package internal + +import ( + "context" + + "github.com/netbirdio/netbird/client/internal/peer" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// enginePushSink bridges the Engine's mgmClient to the PushSink interface +// consumed by connStatePusher. Phase 3.7i of #5989. +type enginePushSink struct{ engine *Engine } + +func (s *enginePushSink) Push(ctx context.Context, m *mgmProto.PeerConnectionMap) error { + return s.engine.mgmClient.SyncPeerConnections(ctx, m) +} + +// enginePeerStateSource bridges the Engine's statusRecorder to the +// PeerStateSource interface consumed by connStatePusher. Phase 3.7i of #5989. +type enginePeerStateSource struct{ engine *Engine } + +func (s *enginePeerStateSource) SnapshotAllRemotePeers() []PeerStateChangeEvent { + fs := s.engine.statusRecorder.GetFullStatus() + out := make([]PeerStateChangeEvent, 0, len(fs.Peers)) + for _, st := range fs.Peers { + out = append(out, peerStateToEvent(st.PubKey, st)) + } + return out +} + +// peerStateToEvent converts a peer.State to a PeerStateChangeEvent suitable +// for the connStatePusher. The Endpoint field is set to +// "local ↔ remote" when both ICE candidate endpoints are known. +func peerStateToEvent(pubkey string, st peer.State) PeerStateChangeEvent { + var ct mgmProto.ConnType + switch { + case st.ConnStatus == peer.StatusConnected && !st.Relayed: + ct = mgmProto.ConnType_CONN_TYPE_P2P + case st.ConnStatus == peer.StatusConnected && st.Relayed: + ct = mgmProto.ConnType_CONN_TYPE_RELAYED + case st.ConnStatus == peer.StatusConnecting: + ct = mgmProto.ConnType_CONN_TYPE_CONNECTING + default: + ct = mgmProto.ConnType_CONN_TYPE_IDLE + } + + endpoint := st.LocalIceCandidateEndpoint + if endpoint != "" && st.RemoteIceCandidateEndpoint != "" { + endpoint = st.LocalIceCandidateEndpoint + " <-> " + st.RemoteIceCandidateEndpoint + } + + return PeerStateChangeEvent{ + Pubkey: pubkey, + ConnType: ct, + LastHandshake: st.LastWireguardHandshake, + LatencyMS: uint32(st.Latency.Milliseconds()), + Endpoint: endpoint, + RelayServer: st.RelayServerAddress, + RxBytes: uint64(st.BytesRx), + TxBytes: uint64(st.BytesTx), + } +} diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index f4c5be70a52..98730bc4aba 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -1671,7 +1671,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil, nil, nil) if err != nil { return nil, "", err } diff --git a/client/internal/lazyconn/env.go b/client/internal/lazyconn/env.go index 649d1cd65de..cfdcc67d61d 100644 --- a/client/internal/lazyconn/env.go +++ b/client/internal/lazyconn/env.go @@ -12,6 +12,11 @@ const ( EnvInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +// IsLazyConnEnabledByEnv reads NB_ENABLE_EXPERIMENTAL_LAZY_CONN. +// +// Deprecated: use peer.ResolveModeFromEnv() -- kept here to not break +// existing callers in conn_mgr.go during the Phase-1 refactor; will be +// removed once all call sites use the new resolver. func IsLazyConnEnabledByEnv() bool { val := os.Getenv(EnvEnableLazyConn) if val == "" { diff --git a/client/internal/lazyconn/inactivity/manager.go b/client/internal/lazyconn/inactivity/manager.go index 0120f443049..3758eb0869b 100644 --- a/client/internal/lazyconn/inactivity/manager.go +++ b/client/internal/lazyconn/inactivity/manager.go @@ -14,6 +14,14 @@ import ( const ( checkInterval = 1 * time.Minute + // DefaultInactivityThreshold is the relay-tunnel idle-teardown + // fallback when neither client config nor server-pushed value sets + // it. Reverted 2026-05-07 to the original 15 min value (was bumped + // to 24 h locally during 2026-05-03 testing, but that change is not + // in scope for this PR — keeping existing p2p-lazy semantics intact + // is required so p2p-dynamic is the only mode whose lifecycle + // changes. p2p-dynamic users that want a longer warm window can + // override via per-peer relay_timeout_seconds. DefaultInactivityThreshold = 15 * time.Minute MinimumInactivityThreshold = 1 * time.Minute ) @@ -22,30 +30,89 @@ type WgInterface interface { LastActivities() map[string]monotime.Time } +// Manager watches per-peer activity timestamps from the WireGuard +// interface and notifies via channels when peers cross inactivity +// thresholds. +// +// Phase 2 (#5989) introduced TWO independent thresholds per peer: +// - iceTimeout fires the iceInactiveChan (consumer detaches the ICE +// worker but keeps the relay-tunnel up). +// - relayTimeout fires the relayInactiveChan (consumer tears down +// the whole connection). +// +// Threshold == 0 disables that channel for all peers (the corresponding +// teardown never fires). Phase-1 p2p-lazy is expressed as +// iceTimeout=0 + relayTimeout=X; the legacy InactivePeersChan is the +// same as RelayInactiveChan for backwards compat. type Manager struct { - inactivePeersChan chan map[string]struct{} + iface WgInterface - iface WgInterface - interestedPeers map[string]*lazyconn.PeerConfig + // Two-timer thresholds (Phase 2). Both 0 = manager is effectively + // inert (peers register but no channel ever fires). + iceTimeout time.Duration + relayTimeout time.Duration + + interestedPeers map[string]*lazyconn.PeerConfig + + iceInactiveChan chan map[string]struct{} + relayInactiveChan chan map[string]struct{} + + // inactivityThreshold + inactivePeersChan are kept for the + // Phase-1 NewManager API. Internally they alias to the relay + // timeout / channel. inactivityThreshold time.Duration + inactivePeersChan chan map[string]struct{} } +// NewManager is the Phase-1 single-timer constructor. Pass a *time.Duration +// to override the default DefaultInactivityThreshold; nil uses the default. +// +// Deprecated: use NewManagerWithTwoTimers. NewManager remains the entry +// point for callers that haven't been migrated; it constructs a manager +// with iceTimeout=0 (= ICE always-on, p2p-lazy semantics). func NewManager(iface WgInterface, configuredThreshold *time.Duration) *Manager { - inactivityThreshold, err := validateInactivityThreshold(configuredThreshold) + threshold, err := validateInactivityThreshold(configuredThreshold) if err != nil { - inactivityThreshold = DefaultInactivityThreshold + threshold = DefaultInactivityThreshold log.Warnf("invalid inactivity threshold configured: %v, using default: %v", err, DefaultInactivityThreshold) } - log.Infof("inactivity threshold configured: %v", inactivityThreshold) + log.Infof("inactivity threshold configured: %v", threshold) + return newManager(iface, 0, threshold) +} + +// NewManagerWithTwoTimers is the Phase-2 constructor. Pass 0 for either +// timeout to disable that teardown path. Both 0 leaves the manager +// running but inert (no channel ever fires) -- used by p2p / relay-forced +// modes that don't tear down workers. +func NewManagerWithTwoTimers(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + if iceTimeout > 0 { + log.Infof("ICE inactivity timeout: %v", iceTimeout) + } + if relayTimeout > 0 { + log.Infof("relay inactivity timeout: %v", relayTimeout) + } + return newManager(iface, iceTimeout, relayTimeout) +} + +func newManager(iface WgInterface, iceTimeout, relayTimeout time.Duration) *Manager { + relayCh := make(chan map[string]struct{}, 1) return &Manager{ - inactivePeersChan: make(chan map[string]struct{}, 1), iface: iface, + iceTimeout: iceTimeout, + relayTimeout: relayTimeout, interestedPeers: make(map[string]*lazyconn.PeerConfig), - inactivityThreshold: inactivityThreshold, + iceInactiveChan: make(chan map[string]struct{}, 1), + relayInactiveChan: relayCh, + inactivityThreshold: relayTimeout, + inactivePeersChan: relayCh, // Phase-1 alias: same channel as relayInactiveChan } } +// InactivePeersChan is the Phase-1 channel for whole-tunnel teardown. +// In the Phase-2 internal model this is the same channel as +// RelayInactiveChan -- existing callers (engine.go p2p-lazy path) keep +// working unchanged. func (m *Manager) InactivePeersChan() chan map[string]struct{} { if m == nil { // return a nil channel that blocks forever @@ -55,6 +122,26 @@ func (m *Manager) InactivePeersChan() chan map[string]struct{} { return m.inactivePeersChan } +// ICEInactiveChan returns the channel that signals ICE-worker-only +// inactivity per peer (consumer typically calls Conn.DetachICE). +// Always returns a valid channel; if iceTimeout is 0, the channel +// just never fires. +func (m *Manager) ICEInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.iceInactiveChan +} + +// RelayInactiveChan returns the channel that signals relay-worker +// (and thus whole-tunnel) inactivity per peer. +func (m *Manager) RelayInactiveChan() chan map[string]struct{} { + if m == nil { + return nil + } + return m.relayInactiveChan +} + func (m *Manager) AddPeer(peerCfg *lazyconn.PeerConfig) { if m == nil { return @@ -95,24 +182,25 @@ func (m *Manager) Start(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C(): - idlePeers, err := m.checkStats() + iceIdle, relayIdle, err := m.checkStats() if err != nil { log.Errorf("error checking stats: %v", err) return } - if len(idlePeers) == 0 { - continue + if len(iceIdle) > 0 { + m.notifyChan(ctx, m.iceInactiveChan, iceIdle) + } + if len(relayIdle) > 0 { + m.notifyChan(ctx, m.relayInactiveChan, relayIdle) } - - m.notifyInactivePeers(ctx, idlePeers) } } } -func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[string]struct{}) { +func (m *Manager) notifyChan(ctx context.Context, ch chan map[string]struct{}, peers map[string]struct{}) { select { - case m.inactivePeersChan <- inactivePeers: + case ch <- peers: case <-ctx.Done(): return default: @@ -120,10 +208,24 @@ func (m *Manager) notifyInactivePeers(ctx context.Context, inactivePeers map[str } } -func (m *Manager) checkStats() (map[string]struct{}, error) { +// checkStats walks the per-peer activity-since values and groups peers +// into two sets: +// - iceIdle: peers idle longer than iceTimeout (only populated when +// iceTimeout > 0; otherwise this set is always empty) +// - relayIdle: peers idle longer than relayTimeout (only populated +// when relayTimeout > 0) +// +// Both sets are returned independently so consumers can act on each +// without coupling. A peer that has crossed both thresholds appears in +// both sets and the consumer is expected to handle them in order +// (first DetachICE on the iceIdle set, then full Close on the relayIdle +// set; the order is fine because Close on a peer where ICE is already +// detached is still correct). +func (m *Manager) checkStats() (iceIdle, relayIdle map[string]struct{}, err error) { lastActivities := m.iface.LastActivities() - idlePeers := make(map[string]struct{}) + iceIdle = make(map[string]struct{}) + relayIdle = make(map[string]struct{}) checkTime := time.Now() for peerID, peerCfg := range m.interestedPeers { @@ -135,13 +237,18 @@ func (m *Manager) checkStats() (map[string]struct{}, error) { } since := monotime.Since(lastActive) - if since > m.inactivityThreshold { - peerCfg.Log.Infof("peer is inactive since time: %s", checkTime.Add(-since).String()) - idlePeers[peerID] = struct{}{} + + if m.iceTimeout > 0 && since > m.iceTimeout { + peerCfg.Log.Debugf("peer ICE idle since: %s", checkTime.Add(-since).String()) + iceIdle[peerID] = struct{}{} + } + if m.relayTimeout > 0 && since > m.relayTimeout { + peerCfg.Log.Infof("peer relay idle since: %s", checkTime.Add(-since).String()) + relayIdle[peerID] = struct{}{} } } - return idlePeers, nil + return iceIdle, relayIdle, nil } func validateInactivityThreshold(configuredThreshold *time.Duration) (time.Duration, error) { diff --git a/client/internal/lazyconn/inactivity/manager_test.go b/client/internal/lazyconn/inactivity/manager_test.go index 10b4ef1ebb4..db3e648867c 100644 --- a/client/internal/lazyconn/inactivity/manager_test.go +++ b/client/internal/lazyconn/inactivity/manager_test.go @@ -23,9 +23,11 @@ func (m *mockWgInterface) LastActivities() map[string]monotime.Time { func TestPeerTriggersInactivity(t *testing.T) { peerID := "peer1" + // Past activity must exceed DefaultInactivityThreshold (24 h after + // the Phase-3.7i tuning) — pick 25 h for safety margin. wgMock := &mockWgInterface{ lastActivities: map[string]monotime.Time{ - peerID: monotime.Time(int64(monotime.Now()) - int64(20*time.Minute)), + peerID: monotime.Time(int64(monotime.Now()) - int64(25*time.Hour)), }, } @@ -112,3 +114,263 @@ func (f *fakeTickerMock) C() <-chan time.Time { } func (f *fakeTickerMock) Stop() {} + +// --- Phase 2 (#5989) two-timer tests --- + +// makePeerCfg is a test helper for building a minimal PeerConfig with logger. +func makePeerCfg(peerID string) *lazyconn.PeerConfig { + return &lazyconn.PeerConfig{ + PublicKey: peerID, + Log: log.WithField("peer", peerID), + } +} + +// pastActivity returns a monotime.Time corresponding to (now - d). +func pastActivity(d time.Duration) monotime.Time { + return monotime.Time(int64(monotime.Now()) - int64(d)) +} + +func TestTwoTimers_OnlyICEFires(t *testing.T) { + peerID := "peer1" + + // Peer idle for 6 minutes: above iceTimeout (5m), below relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID, "expected peerID on ICE channel") + case <-time.After(1 * time.Second): + t.Fatal("expected ICE-inactive event, none received") + } + + // Relay channel must NOT fire. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should not fire when only iceTimeout exceeded") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothFire(t *testing.T) { + peerID := "peer1" + + // Peer idle for 25h: above both iceTimeout (5m) and relayTimeout (24h). + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(25 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 24*time.Hour) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + gotICE := false + gotRelay := false + deadline := time.After(1 * time.Second) + for !gotICE || !gotRelay { + select { + case peers := <-manager.ICEInactiveChan(): + if _, ok := peers[peerID]; ok { + gotICE = true + } + case peers := <-manager.RelayInactiveChan(): + if _, ok := peers[peerID]; ok { + gotRelay = true + } + case <-deadline: + t.Fatalf("timeout waiting for both channels (gotICE=%v, gotRelay=%v)", gotICE, gotRelay) + } + } +} + +func TestTwoTimers_ICEDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=0 (disabled) + relayTimeout=10m, peer idle 11m -> only relay fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(11 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 10*time.Minute) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.RelayInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("relay channel should fire when relayTimeout exceeded") + } + + // ICE channel must never fire because iceTimeout=0. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel should NEVER fire when iceTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_RelayDisabled(t *testing.T) { + peerID := "peer1" + + // iceTimeout=5m + relayTimeout=0, peer idle 6m -> only ICE fires. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(6 * time.Minute), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 5*time.Minute, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + select { + case peers := <-manager.ICEInactiveChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("ICE channel should fire when iceTimeout exceeded") + } + + // Relay channel must never fire because relayTimeout=0. + select { + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel should NEVER fire when relayTimeout=0") + case <-time.After(200 * time.Millisecond): + // expected + } +} + +func TestTwoTimers_BothDisabled(t *testing.T) { + peerID := "peer1" + + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(99 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + manager := NewManagerWithTwoTimers(wgMock, 0, 0) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // Neither channel should fire. + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire when both disabled") + case <-manager.RelayInactiveChan(): + t.Fatal("Relay channel must not fire when both disabled") + case <-time.After(300 * time.Millisecond): + // expected + } +} + +// TestPhase1_LazyEquivalence verifies that the legacy NewManager constructor +// behaves identically to the Phase-1 single-timer code: peers cross the +// (single) inactivityThreshold and appear on InactivePeersChan, ICE +// channel never fires. +func TestPhase1_LazyEquivalence(t *testing.T) { + peerID := "peer1" + + // DefaultInactivityThreshold is 24 h (Phase-3.7i tuning); use 25 h + // of past activity so the test is robust to that constant changing + // in either direction. + wgMock := &mockWgInterface{ + lastActivities: map[string]monotime.Time{ + peerID: pastActivity(25 * time.Hour), + }, + } + + fakeTick := make(chan time.Time, 1) + newTicker = func(d time.Duration) Ticker { + return &fakeTickerMock{CChan: fakeTick} + } + + // Phase-1 entry point with default threshold. + manager := NewManager(wgMock, nil) + manager.AddPeer(makePeerCfg(peerID)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go manager.Start(ctx) + + fakeTick <- time.Now() + + // InactivePeersChan (Phase-1 alias of RelayInactiveChan) must fire. + select { + case peers := <-manager.InactivePeersChan(): + assert.Contains(t, peers, peerID) + case <-time.After(1 * time.Second): + t.Fatal("Phase-1 InactivePeersChan must fire (= RelayInactiveChan in Phase 2)") + } + + // ICE channel must NEVER fire from Phase-1 entry point (iceTimeout=0). + select { + case <-manager.ICEInactiveChan(): + t.Fatal("ICE channel must not fire in Phase-1 NewManager mode") + case <-time.After(200 * time.Millisecond): + // expected + } +} diff --git a/client/internal/lazyconn/manager/manager.go b/client/internal/lazyconn/manager/manager.go index fc47bda39d5..c1c4be003d8 100644 --- a/client/internal/lazyconn/manager/manager.go +++ b/client/internal/lazyconn/manager/manager.go @@ -28,7 +28,31 @@ type managedPeer struct { } type Config struct { + // Phase-1 single-timer field. Deprecated: use ICEInactivityThreshold + // and RelayInactivityThreshold instead. Kept so existing callers + // (engine.go) compile during the Phase-2 transition; internally + // treated as RelayInactivityThreshold when the new fields are zero. InactivityThreshold *time.Duration + + // ICEInactivityThreshold is the per-peer ICE-worker idle timeout + // (Phase 2 / #5989). 0 = ICE always-on (= p2p-lazy semantics, where + // the whole tunnel goes idle but ICE is never torn down separately). + ICEInactivityThreshold time.Duration + + // RelayInactivityThreshold is the per-peer relay-worker idle timeout + // (Phase 2). 0 = relay always-on. + RelayInactivityThreshold time.Duration +} + +// resolvedTimeouts returns the effective (ICE, Relay) timeouts. If only +// the deprecated InactivityThreshold field is set, it maps onto the +// relay timeout for Phase-1 p2p-lazy semantics. +func (c Config) resolvedTimeouts() (iceTimeout, relayTimeout time.Duration) { + relay := c.RelayInactivityThreshold + if relay == 0 && c.InactivityThreshold != nil { + relay = *c.InactivityThreshold + } + return c.ICEInactivityThreshold, relay } // Manager manages lazy connections @@ -76,7 +100,13 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S } if wgIface.IsUserspaceBind() { - m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) + iceTO, relayTO := config.resolvedTimeouts() + if iceTO == 0 && relayTO == 0 { + // Phase 1 / single-timer fallback when caller hasn't migrated. + m.inactivityManager = inactivity.NewManager(wgIface, config.InactivityThreshold) //nolint:staticcheck // intentional Phase-1 single-timer fallback + } else { + m.inactivityManager = inactivity.NewManagerWithTwoTimers(wgIface, iceTO, relayTO) + } } else { log.Warnf("inactivity manager not supported for kernel mode, wait for remote peer to close the connection") } @@ -84,6 +114,18 @@ func NewManager(config Config, engineCtx context.Context, peerStore *peerstore.S return m } +// InactivityManager exposes the underlying inactivity.Manager so the +// engine / conn_mgr can subscribe to ICEInactiveChan / RelayInactiveChan +// in the p2p-dynamic mode lifecycle. Returns nil if the manager runs in +// kernel-bind mode (no inactivity tracking) or if the manager itself is +// nil (defensive). +func (m *Manager) InactivityManager() *inactivity.Manager { + if m == nil { + return nil + } + return m.inactivityManager +} + // UpdateRouteHAMap updates the HA group mappings for routes // This should be called when route configuration changes func (m *Manager) UpdateRouteHAMap(haMap route.HAMap) { diff --git a/client/internal/lazyconn/support.go b/client/internal/lazyconn/support.go index 5e765c2d6f4..1f9927d3864 100644 --- a/client/internal/lazyconn/support.go +++ b/client/internal/lazyconn/support.go @@ -15,6 +15,19 @@ func IsSupported(agentVersion string) bool { return true } + // Custom dev/CI builds with explicit prefix or embedded marker: + // "dev-089a95a", "ci-abcdef" (bare prefix form) + // "0.0.0-dev-1b923aad9", "0.0.0-ci-…" (semver-padded form used by + // build-android-lib.sh so + // version.NewVersion can parse) + // All come from the same source tree as the "development" build + // above; assume they support lazy. Only the random short-hash form + // (e.g. "a6c5960") lacks any prefix signal. + if strings.HasPrefix(agentVersion, "dev-") || strings.HasPrefix(agentVersion, "ci-") || + strings.Contains(agentVersion, "-dev-") || strings.Contains(agentVersion, "-ci-") { + return true + } + // filter out versions like this: a6c5960, a7d5c522, d47be154 if !strings.Contains(agentVersion, ".") { return false diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 1e416bfe707..69dff602ac8 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/iface/wgproxy" "github.com/netbirdio/netbird/client/internal/metrics" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/internal/peer/conntype" "github.com/netbirdio/netbird/client/internal/peer/dispatcher" "github.com/netbirdio/netbird/client/internal/peer/guard" @@ -86,11 +87,24 @@ type ConnConfig struct { // ICEConfig ICE protocol configuration ICEConfig icemaker.Config + + // Mode is the resolved connection mode for this peer (forwarded + // from the engine, which got it from the conn_mgr precedence chain). + // Phase 1 uses it to pick the skip-ICE branch when ModeRelayForced. + Mode connectionmode.Mode + + // P2pRetryMaxSeconds is the cap for the ICE-failure backoff schedule + // in p2p-dynamic mode. 0 = use built-in default (DefaultP2PRetryMax). + // Wire-format sentinel uint32-max (= ^uint32(0)) means "user-explicit + // disable", which the resolver translates to time.Duration(0) at + // engine.go before passing it here. Phase 3 of #5989. + P2pRetryMaxSeconds uint32 } type Conn struct { Log *log.Entry mu sync.Mutex + iceBackoff *iceBackoffState ctx context.Context ctxCancel context.CancelFunc config ConnConfig @@ -185,8 +199,24 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) - forceRelay := IsForceRelayed() - if !forceRelay { + // Phase 3: initialize per-peer ICE-failure backoff. The cap comes + // from the resolved P2pRetryMaxSeconds. 0 means "use built-in default". + backoffCap := time.Duration(conn.config.P2pRetryMaxSeconds) * time.Second + if backoffCap == 0 { + backoffCap = DefaultP2PRetryMax + } + if conn.iceBackoff == nil { + conn.iceBackoff = newIceBackoff(backoffCap) + } else { + conn.iceBackoff.SetMaxBackoff(backoffCap) + } + + // Mode-driven branching. ModeRelayForced skips ICE entirely; all + // other modes (P2P, P2PLazy, P2PDynamic) construct workerICE + // eagerly in Phase 1. Phase 2 will branch P2PDynamic separately + // to defer the OnNewOffer registration. + skipICE := conn.config.Mode == connectionmode.ModeRelayForced + if !skipICE { relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) if err != nil { @@ -198,11 +228,25 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages) conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer) - if !forceRelay { + + // ICE-listener registration depends on mode: + // - ModeRelayForced: skipICE=true, no workerICE, no listener. + // - ModeP2P, ModeP2PLazy: workerICE constructed, listener registered eagerly. + // P2PLazy's whole-tunnel deferral happens at the conn_mgr level, not here. + // - ModeP2PDynamic: workerICE constructed eagerly so it's ready, but the + // listener registration is deferred. The inactivity manager calls + // Conn.AttachICE() once activity is observed on the relay tunnel. + deferICEListener := conn.config.Mode == connectionmode.ModeP2PDynamic + if !skipICE && !deferICEListener { conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) } conn.guard = guard.NewGuard(conn.Log, conn.isConnectedOnAllWay, conn.config.Timeout, conn.srWatcher) + // Phase 3.5 (#5989): reset ICE backoff + recreate workerICE on network change. + // Set before Start() is called so the goroutine sees it without races. + if !skipICE { + conn.guard.SetOnNetworkChange(conn.onNetworkChange) + } conn.wg.Add(1) go func() { @@ -398,10 +442,11 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn ep = directEp } - if conn.wgProxyRelay != nil { - conn.wgProxyRelay.Pause() - } - + // Bring the new ICE proxy up FIRST so the destination is ready to + // receive packets. Then update WG to use it. Only after WG has + // committed to the new endpoint do we pause the relay -- otherwise + // there is a 1-2 s window where relay is suspended but WG still + // points at it, dropping every packet in that window. if wgProxy != nil { wgProxy.Work() } @@ -420,6 +465,10 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn if conn.wgProxyRelay != nil { conn.Log.Debugf("redirect packets from relayed conn to WireGuard") conn.wgProxyRelay.RedirectAs(ep) + // Pause AFTER the redirect is wired up so any in-flight packet + // from the relay end has a forwarding path while WG converges + // onto the direct endpoint. + conn.wgProxyRelay.Pause() } conn.currentConnPriority = priority @@ -464,9 +513,14 @@ func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { } else { conn.Log.Infof("ICE disconnected, do not switch to Relay. Reset priority to: %s", conntype.None.String()) conn.currentConnPriority = conntype.None - if err := conn.config.WgConfig.WgInterface.RemoveEndpointAddress(conn.config.WgConfig.RemoteKey); err != nil { - conn.Log.Errorf("failed to remove wg endpoint: %v", err) - } + // Intentionally NOT calling RemoveEndpointAddress here: a brief + // ICE flap (NAT rebind, signal hiccup) is followed within 1-2 s + // by a fresh ICE-connected callback that re-configures the WG + // endpoint. Actively removing the endpoint creates a no-endpoint + // window in which WG drops every packet rather than queuing on + // a slightly-stale address that the next ConfigureWGEndpoint + // will replace anyway. If the disconnect is permanent, WG's own + // keepalive timeout will surface the dead peer. } changed := conn.statusICE.Get() != worker.StatusDisconnected @@ -740,7 +794,7 @@ func (conn *Conn) isConnectedOnAllWay() (status guard.ConnStatus) { } return evalConnStatus(connStatusInputs{ - forceRelay: IsForceRelayed(), + forceRelay: conn.config.Mode == connectionmode.ModeRelayForced, peerUsesRelay: conn.workerRelay.IsRelayConnectionSupportedWithPeer(), relayConnected: conn.statusRelay.Get() == worker.StatusConnected, remoteSupportsICE: conn.handshaker.RemoteICESupported(), @@ -975,3 +1029,251 @@ func boolToConnStatus(connected bool) guard.ConnStatus { } return guard.ConnStatusDisconnected } + +// AttachICE registers the ICE-offer listener on the handshaker after the +// activity-detector observes traffic on the relay tunnel. Idempotent: if +// the listener is already attached, it is a no-op. Triggers a fresh offer +// so the remote side learns we are now ICE-capable. +// +// Used by p2p-dynamic mode: workerICE is created in Open() but the +// handshaker dispatch is deferred until traffic activity is seen. +func (conn *Conn) AttachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return nil + } + if conn.handshaker == nil { + return fmt.Errorf("AttachICE: handshaker not initialized (Open not called)") + } + if conn.workerICE == nil { + return fmt.Errorf("AttachICE: workerICE is nil (relay-forced mode)") + } + + if !conn.attachICEListenerLocked() { + return nil + } + + if err := conn.handshaker.SendOffer(); err != nil { + conn.Log.Warnf("AttachICE: SendOffer failed: %v", err) + } + return nil +} + +// attachICEListenerLocked attaches the ICE listener to the handshaker if it +// is not already attached. Returns true when a new attachment was made, +// false when the call was a no-op (already attached, ICE backoff suspended, +// handshaker not initialised, or workerICE not present). +// +// Caller MUST hold conn.mu. Used by: +// - AttachICE (signal-trigger path), which then issues SendOffer. +// - onNetworkChange (Phase 3.7e, #5989), which deliberately does NOT call +// SendOffer because the Guard reconnect-loop handles that. +// +// Honours iceBackoff.IsSuspended() so the failure-backoff is not bypassed. +func (conn *Conn) attachICEListenerLocked() bool { + if conn.iceBackoff != nil && conn.iceBackoff.IsSuspended() { + snap := conn.iceBackoff.Snapshot() + conn.Log.Debugf("ICE backoff active (failure #%d, retry at %s), staying on relay", + snap.Failures, + snap.NextRetry.Format("15:04:05")) + return false + } + if conn.handshaker == nil || conn.workerICE == nil { + return false + } + if conn.handshaker.readICEListener() != nil { + return false + } + + conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) + conn.Log.Debugf("ICE listener attached (locked path)") + return true +} + +// DetachICE removes the ICE-offer listener and tears down the ICE worker. +// Idempotent: if no listener is attached, it is a no-op. Used by +// p2p-dynamic mode when the inactivity manager fires the iceTimeout but +// the relay tunnel should stay up. +func (conn *Conn) DetachICE() error { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.handshaker == nil { + return nil + } + if conn.handshaker.readICEListener() == nil { + return nil + } + + conn.handshaker.RemoveICEListener() + if conn.workerICE != nil { + conn.workerICE.Close() + } + conn.Log.Debugf("ICE listener detached (p2p-dynamic teardown)") + return nil +} + +// onICEFailed is invoked when pion's ICE agent reports +// ConnectionStateFailed. Increments the backoff counter and tears +// down the ICE worker. Phase 3 of #5989. +func (conn *Conn) onICEFailed() { + if conn.iceBackoff == nil { + return + } + delay := conn.iceBackoff.markFailure() + snap := conn.iceBackoff.Snapshot() + if delay > 0 { + conn.Log.Infof("ICE failure #%d, suspending for %s, next retry at %s", + snap.Failures, + delay.Round(time.Second), + snap.NextRetry.Format("15:04:05")) + } + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, snap) + } + // Tear down ICE. Idempotent. Conn stays on relay. + if err := conn.DetachICE(); err != nil { + conn.Log.Warnf("DetachICE after onICEFailed: %v", err) + } +} + +// onICEConnected is invoked when pion's ICE agent reports +// ConnectionStateConnected. Resets the backoff. Phase 3 of #5989. +func (conn *Conn) onICEConnected() { + if conn.iceBackoff == nil { + return + } + if conn.iceBackoff.Snapshot().Failures > 0 { + conn.Log.Infof("ICE success, resetting backoff (was %d failures)", + conn.iceBackoff.Snapshot().Failures) + } + conn.iceBackoff.markSuccess() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } +} + +// SetIceBackoffMax updates the per-peer backoff cap. Called by ConnMgr +// when the server pushes a new p2p_retry_max_seconds value. If the +// iceBackoff is not yet initialized (Conn not opened yet), the value +// is stored in config so Open() picks it up. Phase 3 of #5989. +func (conn *Conn) SetIceBackoffMax(d time.Duration) { + conn.mu.Lock() + defer conn.mu.Unlock() + conn.config.P2pRetryMaxSeconds = uint32(d / time.Second) + if conn.iceBackoff != nil { + conn.iceBackoff.SetMaxBackoff(d) + } +} + +// IceBackoffSnapshot exposes the read-only backoff state for the +// status output (Task E1). Returns zero-value snapshot if no backoff +// is active. Phase 3 of #5989. +func (conn *Conn) IceBackoffSnapshot() BackoffSnapshot { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.iceBackoff == nil { + return BackoffSnapshot{} + } + return conn.iceBackoff.Snapshot() +} + +// onNetworkChange is invoked by Guard when the signal/relay layer +// reconnects after a network change (LTE-modem replug, WiFi roaming, etc.). +// Phase 3.5 of #5989. +// +// Resets the per-peer ICE-failure backoff (because the NAT topology may +// have changed -- previous failures do not predict future ones) AND +// recreates the workerICE wrapper so the next AttachICE/offer has a +// fresh pion-agent rather than one closed by a previous DetachICE call. +// +// Called from Guard's goroutine; acquires conn.mu, so it must not be +// invoked from a path that already holds conn.mu. +func (conn *Conn) onNetworkChange() { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.ctx.Err() != nil { + return + } + + if conn.iceBackoff != nil { + snap := conn.iceBackoff.Snapshot() + if snap.Failures > 0 { + conn.Log.Infof("network change detected, resetting ICE backoff (was %d failures)", + snap.Failures) + } + conn.iceBackoff.Reset() + if conn.statusRecorder != nil { + conn.statusRecorder.UpdatePeerIceBackoff(conn.config.Key, conn.iceBackoff.Snapshot()) + } + } + + // We deliberately do NOT replace the workerICE wrapper here. Replacing + // it leaks underlying socket/iface bindings between the old and new + // instance, which empirically causes ICE to fail with a 13s pair-check + // timeout instead of converging in <1s like a fresh daemon-start does. + // + // We also deliberately do NOT call handshaker.SendOffer() here even + // though that was an earlier attempt. The Guard's reconnect-loop + // already issues sendOffer via its newReconnectTicker (800ms initial, + // up to ~4 retries in the first ~6s) right after the same srReconnect + // event that fires this callback. Adding our own SendOffer just creates + // a sending-offer storm: 5 offers per peer in 6 seconds, which on the + // remote side triggers repeated tear-down + reCreateAgent cycles in + // quick succession (each new sessionID forces it). That prevents ICE + // from ever completing its pair-checks. + // + // All we do here: close the current pion agent (sets w.agent = nil). + // The Guard's natural reconnect-loop then drives the next sendOffer, + // the remote responds with a fresh offer, and our existing OnNewOffer + // path (still attached to the unchanged workerICE wrapper) goes + // through the well-tested "agent==nil + new offer -> reCreateAgent" + // branch in worker_ice.go. + // + // Phase 3.7g (#5989): only tear down the workerICE agent when ICE is + // actually broken. If pion's lastKnownState is still Connected the + // peer-to-peer UDP path is alive end-to-end (typical for a brief + // signal-server outage where WG keepalives between peers continued + // to flow); closing the agent here would force a 15-25 s ICE + // renegotiation cycle plus a Relay→ICE handover gap that the user + // would observe as a ping dropout for no good reason. + // + // If ICE actually went Disconnected/Failed during the network event, + // pion has already cleared w.agent via onConnectionStateChange and + // the Close call below is a no-op anyway. Either way, a fresh remote + // OFFER will recreate the agent through the existing OnNewOffer path. + // + // In ModeRelayForced workerICE is nil; nothing to close. + if conn.workerICE != nil && !conn.workerICE.IsConnected() { + conn.workerICE.Close() + } else if conn.workerICE != nil { + conn.Log.Debugf("network change: skipping workerICE.Close (ICE still Connected, soft-fallback)") + } + + // Phase 3.7e (#5989): force the ICE listener back on after a network + // change. Empirically, after an LTE-modem replug the iceListener can + // end up detached for some peers (paths via onICEFailed → DetachICE + // after a Failed transition that we did not log because of timing, + // or via concurrent state changes during the bounce). Re-attaching + // on every signal in ConnMgr.ActivatePeer (Phase 3.7d) is necessary + // but not sufficient: by the time the next signal arrives, several + // remote OFFERs and the Guard's first sendOffer may already have + // been silently dropped at handshaker.Listen() because no listener + // was present. Re-attaching here closes that window deterministically. + // + // We do NOT call SendOffer from this path. The Guard's natural + // reconnect-ticker (newReconnectTicker, 800 ms initial) issues the + // next offer right after the same srReconnect event that drove this + // callback; sending an extra one creates the offer-storm that + // Phase 3.7b removed. + conn.attachICEListenerLocked() + + conn.Log.Debugf("ICE state reset on network change (agent closed; listener re-armed; Guard will resend offer)") +} diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 59216b647e9..58b8432bdd2 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "os" + "strings" "testing" "time" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/netbirdio/netbird/client/iface" @@ -281,6 +283,137 @@ func TestConn_presharedKey(t *testing.T) { } } +// TestConn_AttachICE_NilHandshaker verifies AttachICE errors when called +// before Open() has wired up the handshaker. +func TestConn_AttachICE_NilHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE on Conn with nil handshaker should return error") + } +} + +// TestConn_AttachICE_NilWorkerICE verifies AttachICE errors when the conn +// is in relay-forced mode (workerICE was never created). +func TestConn_AttachICE_NilWorkerICE(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + } + if err := c.AttachICE(); err == nil { + t.Fatal("AttachICE with nil workerICE should return error (relay-forced mode)") + } +} + +// TestConn_DetachICE_NoHandshaker is a no-op idempotency check: calling +// DetachICE before Open() must not panic and must not error. +func TestConn_DetachICE_NoHandshaker(t *testing.T) { + c := &Conn{Log: log.WithField("peer", "test")} + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE with nil handshaker should be no-op, got error: %v", err) + } +} + +// TestConn_DetachICE_ClearsListener verifies DetachICE removes the ICE +// listener from the handshaker. workerICE is left nil so Close() is skipped. +func TestConn_DetachICE_ClearsListener(t *testing.T) { + h := &Handshaker{} + h.AddICEListener(func(o *OfferAnswer) {}) + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: h, + } + + if h.readICEListener() == nil { + t.Fatal("precondition: handshaker should have a listener") + } + + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE returned error: %v", err) + } + + if h.readICEListener() != nil { + t.Fatal("DetachICE should clear the ICE listener") + } + + // Idempotent: second call is a no-op. + if err := c.DetachICE(); err != nil { + t.Fatalf("DetachICE second call should be no-op, got: %v", err) + } +} + +func TestConn_AttachICE_NoOpWhenSuspended(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() // suspend it + + // AttachICE should return nil but not actually attach + err := c.AttachICE() + if err != nil { + t.Fatalf("expected nil error during backoff, got %v", err) + } + if c.handshaker.readICEListener() != nil { + t.Fatal("AttachICE during backoff must NOT register a listener") + } +} + +func TestConn_AttachICE_AfterBackoffExpiry(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + handshaker: &Handshaker{}, + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + // Force nextRetry into the past + c.iceBackoff.mu.Lock() + c.iceBackoff.nextRetry = time.Now().Add(-1 * time.Second) + c.iceBackoff.mu.Unlock() + + // Without workerICE, AttachICE returns the "nil workerICE" error + // -- but we only care that the backoff gate is NOT engaged anymore. + err := c.AttachICE() + if err == nil { + t.Fatal("expected the relay-forced error path (nil workerICE)") + } + // The error should be about workerICE, not "suspended": + if errMsg := err.Error(); !strings.Contains(errMsg, "workerICE") { + t.Fatalf("after backoff expiry, error should be about workerICE not suspend; got %q", errMsg) + } +} + +func TestConn_OnICEFailed_MarksBackoffFailure(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + if c.iceBackoff.IsSuspended() { + t.Fatal("precondition: not suspended") + } + c.onICEFailed() + if !c.iceBackoff.IsSuspended() { + t.Fatal("after onICEFailed, must be suspended") + } + if c.iceBackoff.Snapshot().Failures != 1 { + t.Fatalf("failures must be 1, got %d", c.iceBackoff.Snapshot().Failures) + } +} + +func TestConn_OnICEConnected_ResetsBackoff(t *testing.T) { + c := &Conn{ + Log: log.WithField("peer", "test"), + iceBackoff: newIceBackoff(15 * time.Minute), + } + c.iceBackoff.markFailure() + c.iceBackoff.markFailure() + c.onICEConnected() + snap := c.iceBackoff.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after onICEConnected: %+v", snap) + } +} + func TestConn_presharedKey_RosenpassManaged(t *testing.T) { conn := Conn{ config: ConnConfig{ diff --git a/client/internal/peer/env.go b/client/internal/peer/env.go index ed6a3af5391..fbee8f6808b 100644 --- a/client/internal/peer/env.go +++ b/client/internal/peer/env.go @@ -3,14 +3,32 @@ package peer import ( "os" "runtime" + "strconv" "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/shared/connectionmode" ) const ( + EnvKeyNBConnectionMode = "NB_CONNECTION_MODE" EnvKeyNBForceRelay = "NB_FORCE_RELAY" EnvKeyNBHomeRelayServers = "NB_HOME_RELAY_SERVERS" + + envEnableLazyConn = "NB_ENABLE_EXPERIMENTAL_LAZY_CONN" + envInactivityThreshold = "NB_LAZY_CONN_INACTIVITY_THRESHOLD" ) +var deprecationOnce sync.Map // env-var name -> *sync.Once + +// IsForceRelayed reports whether legacy NB_FORCE_RELAY is set, plus the +// runtime-special-case js (always relayed because of browser limitations). +// +// Deprecated: prefer ResolveModeFromEnv. Kept for callers that haven't +// migrated yet (Phase 1 backwards compat). func IsForceRelayed() bool { if runtime.GOOS == "js" { return true @@ -18,6 +36,65 @@ func IsForceRelayed() bool { return strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") } +// ResolveModeFromEnv reads all three legacy env vars plus the new +// NB_CONNECTION_MODE, applies the documented precedence and returns +// the resolved Mode and relay-timeout (in seconds, 0 if unset). +// +// Precedence: +// 1. NB_CONNECTION_MODE if parseable -> wins +// 2. NB_FORCE_RELAY=true -> ModeRelayForced (most-restrictive) +// 3. NB_ENABLE_EXPERIMENTAL_LAZY_CONN=true -> ModeP2PLazy +// 4. otherwise -> ModeUnspecified (caller falls through) +// +// NB_LAZY_CONN_INACTIVITY_THRESHOLD is parsed independently as the +// relay-timeout (alias) and emits a deprecation-warning if used. +func ResolveModeFromEnv() (connectionmode.Mode, uint32) { + mode := connectionmode.ModeUnspecified + + if raw := os.Getenv(EnvKeyNBConnectionMode); raw != "" { + parsed, err := connectionmode.ParseString(raw) + if err != nil { + log.Warnf("ignoring %s=%q: %v", EnvKeyNBConnectionMode, raw, err) + } else if parsed != connectionmode.ModeUnspecified { + mode = parsed + } + } + + if mode == connectionmode.ModeUnspecified { + if strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") { + warnDeprecated(EnvKeyNBForceRelay, EnvKeyNBConnectionMode+"=relay-forced") + mode = connectionmode.ModeRelayForced + } else if isLazyEnvTrue() { + warnDeprecated(envEnableLazyConn, EnvKeyNBConnectionMode+"=p2p-lazy") + mode = connectionmode.ModeP2PLazy + } + } + + timeoutSecs := uint32(0) + if raw := os.Getenv(envInactivityThreshold); raw != "" { + if d, err := time.ParseDuration(raw); err == nil { + timeoutSecs = uint32(d.Seconds()) + warnDeprecated(envInactivityThreshold, "the relay_timeout setting on the management server") + } else { + log.Warnf("ignoring %s=%q: %v", envInactivityThreshold, raw, err) + } + } + + return mode, timeoutSecs +} + +func isLazyEnvTrue() bool { + v, err := strconv.ParseBool(os.Getenv(envEnableLazyConn)) + return err == nil && v +} + +func warnDeprecated(envName, replacement string) { + once, _ := deprecationOnce.LoadOrStore(envName, &sync.Once{}) + once.(*sync.Once).Do(func() { + log.Warnf("env var %s is deprecated; use %s instead. The legacy var still works in this release but may be removed in a future major version.", envName, replacement) + }) +} + // OverrideRelayURLs returns the relay server URL list set in // NB_HOME_RELAY_SERVERS (comma-separated) and a boolean indicating whether // the override is active. When the env var is unset, the boolean is false diff --git a/client/internal/peer/env_test.go b/client/internal/peer/env_test.go new file mode 100644 index 00000000000..b70939243c6 --- /dev/null +++ b/client/internal/peer/env_test.go @@ -0,0 +1,58 @@ +package peer + +import ( + "testing" + + "github.com/netbirdio/netbird/shared/connectionmode" +) + +func TestResolveModeFromEnv(t *testing.T) { + cases := []struct { + name string + envConnMode string + envForceRelay string + envEnableLazy string + envInactivity string + wantMode connectionmode.Mode + wantTimeoutSecs uint32 + }{ + {"all unset", "", "", "", "", connectionmode.ModeUnspecified, 0}, + {"connection_mode wins", "p2p-dynamic", "true", "true", "10s", connectionmode.ModeP2PDynamic, 10}, + {"force_relay alone", "", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"lazy alone", "", "", "true", "", connectionmode.ModeP2PLazy, 0}, + {"force_relay AND lazy: force_relay wins", "", "true", "true", "", connectionmode.ModeRelayForced, 0}, + {"only inactivity threshold", "", "", "", "30m", connectionmode.ModeUnspecified, 1800}, + {"connection_mode unparsable falls through to legacy", "garbage", "true", "", "", connectionmode.ModeRelayForced, 0}, + {"connection_mode parses p2p-lazy", "p2p-lazy", "", "", "", connectionmode.ModeP2PLazy, 0}, + {"force-relay value is true (case-insensitive)", "", "TRUE", "", "", connectionmode.ModeRelayForced, 0}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + t.Setenv(EnvKeyNBConnectionMode, c.envConnMode) + t.Setenv(EnvKeyNBForceRelay, c.envForceRelay) + t.Setenv("NB_ENABLE_EXPERIMENTAL_LAZY_CONN", c.envEnableLazy) + t.Setenv("NB_LAZY_CONN_INACTIVITY_THRESHOLD", c.envInactivity) + + gotMode, gotTimeout := ResolveModeFromEnv() + if gotMode != c.wantMode { + t.Errorf("mode = %v, want %v", gotMode, c.wantMode) + } + if gotTimeout != c.wantTimeoutSecs { + t.Errorf("timeout = %v, want %v", gotTimeout, c.wantTimeoutSecs) + } + }) + } +} + +func TestIsForceRelayedBackwardsCompat(t *testing.T) { + // IsForceRelayed must remain functional for existing callers + // during the migration window (env.go still exposes it). + t.Setenv(EnvKeyNBForceRelay, "true") + if !IsForceRelayed() { + t.Error("IsForceRelayed() should return true when NB_FORCE_RELAY=true") + } + t.Setenv(EnvKeyNBForceRelay, "false") + if IsForceRelayed() { + t.Error("IsForceRelayed() should return false when NB_FORCE_RELAY=false") + } +} diff --git a/client/internal/peer/guard/guard.go b/client/internal/peer/guard/guard.go index 2e5efbcc5a3..0f7f70e899c 100644 --- a/client/internal/peer/guard/guard.go +++ b/client/internal/peer/guard/guard.go @@ -37,6 +37,10 @@ type Guard struct { srWatcher *SRWatcher relayedConnDisconnected chan struct{} iCEConnDisconnected chan struct{} + // onNetworkChange is called when signal/relay reconnects after a + // network change (e.g. LTE-modem replug, WiFi roaming). Set once + // before Start() is called; no lock needed. Phase 3.5 of #5989. + onNetworkChange func() } func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { @@ -50,6 +54,13 @@ func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duratio } } +// SetOnNetworkChange registers a callback that fires whenever the +// signal/relay layer reconnects after a network change. Must be called +// before Start(). Phase 3.5 of #5989. +func (g *Guard) SetOnNetworkChange(cb func()) { + g.onNetworkChange = cb +} + func (g *Guard) Start(ctx context.Context, eventCallback func()) { g.log.Infof("starting guard for reconnection with MaxInterval: %s", g.timeout) g.reconnectLoopWithRetry(ctx, eventCallback) @@ -130,6 +141,10 @@ func (g *Guard) reconnectLoopWithRetry(ctx context.Context, callback func()) { ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C iceState.reset() + // Phase 3.5 (#5989): notify Conn to reset iceBackoff + recreate workerICE + if g.onNetworkChange != nil { + g.onNetworkChange() + } case <-ctx.Done(): g.log.Debugf("context is done, stop reconnect loop") diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index 1d44096b640..b4c787e9fce 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -104,9 +104,30 @@ func (h *Handshaker) AddRelayListener(offer func(remoteOfferAnswer *OfferAnswer) } func (h *Handshaker) AddICEListener(offer func(remoteOfferAnswer *OfferAnswer)) { + h.mu.Lock() + defer h.mu.Unlock() h.iceListener = offer } +// RemoveICEListener clears the ICE-offer listener so subsequent remote +// offers no longer dispatch to workerICE. Idempotent; calling it when +// no listener was set is a no-op. Used by Conn.DetachICE in p2p-dynamic +// mode to deactivate ICE without tearing down the relay path. +func (h *Handshaker) RemoveICEListener() { + h.mu.Lock() + defer h.mu.Unlock() + h.iceListener = nil +} + +// readICEListener returns the current ICE listener under mutex protection. +// Used by Listen() so a concurrent RemoveICEListener cannot race with the +// dispatch loop. +func (h *Handshaker) readICEListener() func(*OfferAnswer) { + h.mu.Lock() + defer h.mu.Unlock() + return h.iceListener +} + func (h *Handshaker) Listen(ctx context.Context) { for { select { @@ -124,8 +145,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote OFFER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } if err := h.sendAnswer(); err != nil { @@ -146,8 +170,11 @@ func (h *Handshaker) Listen(ctx context.Context) { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil && h.RemoteICESupported() { - h.iceListener(&remoteOfferAnswer) + if iceListener := h.readICEListener(); iceListener != nil && h.RemoteICESupported() { + iceListener(&remoteOfferAnswer) + } else if h.RemoteICESupported() { + h.log.Debugf("remote ANSWER (session %s) without local ICE listener (relay-forced mode or peer in ICE backoff)", + remoteOfferAnswer.SessionIDString()) } case <-ctx.Done(): h.log.Infof("stop listening for remote offers and answers") diff --git a/client/internal/peer/handshaker_test.go b/client/internal/peer/handshaker_test.go new file mode 100644 index 00000000000..fdc95411eb8 --- /dev/null +++ b/client/internal/peer/handshaker_test.go @@ -0,0 +1,50 @@ +package peer + +import ( + "testing" +) + +func TestHandshaker_AddRemoveICEListener(t *testing.T) { + h := &Handshaker{} + listener := func(o *OfferAnswer) {} + + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("iceListener should be set after AddICEListener") + } + + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("iceListener should be nil after RemoveICEListener") + } + + // Idempotency: removing again is a no-op. + h.RemoveICEListener() + if h.iceListener != nil { + t.Fatal("RemoveICEListener should be idempotent") + } + + // Re-add works. + h.AddICEListener(listener) + if h.iceListener == nil { + t.Fatal("re-adding the listener should work") + } +} + +func TestHandshaker_readICEListener(t *testing.T) { + h := &Handshaker{} + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener on empty Handshaker should return nil") + } + + listener := func(o *OfferAnswer) {} + h.AddICEListener(listener) + if got := h.readICEListener(); got == nil { + t.Fatal("readICEListener after AddICEListener should return non-nil") + } + + h.RemoveICEListener() + if got := h.readICEListener(); got != nil { + t.Fatal("readICEListener after RemoveICEListener should return nil") + } +} diff --git a/client/internal/peer/ice_backoff.go b/client/internal/peer/ice_backoff.go new file mode 100644 index 00000000000..4a600182ef4 --- /dev/null +++ b/client/internal/peer/ice_backoff.go @@ -0,0 +1,169 @@ +package peer + +import ( + "sync" + "time" + + "github.com/cenkalti/backoff/v4" +) + +const ( + // DefaultP2PRetryMax is the built-in fallback when the management + // server has not pushed a p2p_retry_max_seconds value (Proto wire + // value 0 = "not set"). Phase 3 of #5989. + DefaultP2PRetryMax = 15 * time.Minute + + iceBackoffInitialInterval = 1 * time.Minute + iceBackoffMultiplier = 2.0 + iceBackoffRandomizationFactor = 0.1 + + // networkChangeGracePeriod is the window after Reset() (signal/relay + // reconnect, network-change event) during which markFailure caps the + // suspend delay at networkChangeRetryDelay. Phase 3.7f of #5989. + // + // Rationale: the first ICE pair-check after a network change often + // fails on stale NAT mappings, even when subsequent attempts succeed. + // Falling back to the normal 1-minute initial backoff after that + // single failure leaves the peer on relay for far longer than the + // underlying connectivity actually warrants. A short fixed delay + // inside the grace window lets follow-up attempts run while the new + // LTE/Wi-Fi mapping is still fresh; outside the window the normal + // exponential schedule applies as before. + // + // Phase 3.7h widened the window from 30 s to 60 s and reduced the + // retry delay from 5 s to 2 s after observing real-world LTE-bounce + // behaviour: cold NAT mappings often need 3-4 ICE attempts to prime, + // and the previous 30 s window only fit ~2 attempts (each pair-check + // is ~12-15 s) before the schedule jumped to a 1-minute exponential + // suspend. The wider window plus shorter delay typically fits ~4-5 + // attempts and recovers within ~50 s for peers behind a single NAT + // instead of 2-3 minutes. + networkChangeGracePeriod = 60 * time.Second + networkChangeRetryDelay = 2 * time.Second +) + +// iceBackoffState tracks per-peer ICE-failure backoff in p2p-dynamic +// mode. Phase 3 of #5989. +type iceBackoffState struct { + mu sync.Mutex + bo *backoff.ExponentialBackOff + failures int + nextRetry time.Time + suspended bool + maxBackoff time.Duration + lastResetAt time.Time +} + +// BackoffSnapshot is a read-only view used by the status output. +type BackoffSnapshot struct { + Failures int + NextRetry time.Time + Suspended bool +} + +func newIceBackoff(maxBackoff time.Duration) *iceBackoffState { + return &iceBackoffState{ + bo: buildBackoff(maxBackoff), + maxBackoff: maxBackoff, + } +} + +func buildBackoff(maxBackoff time.Duration) *backoff.ExponentialBackOff { + bo := backoff.NewExponentialBackOff() + bo.InitialInterval = iceBackoffInitialInterval + bo.Multiplier = iceBackoffMultiplier + bo.RandomizationFactor = iceBackoffRandomizationFactor + bo.MaxInterval = maxBackoff + bo.MaxElapsedTime = 0 + bo.Reset() + return bo +} + +func (s *iceBackoffState) IsSuspended() bool { + s.mu.Lock() + defer s.mu.Unlock() + if !s.suspended { + return false + } + if time.Now().After(s.nextRetry) { + return false + } + return true +} + +// markFailure increments the failure counter and computes the next retry +// time. Returns the delay so callers can log it. If maxBackoff is 0 +// (= disabled), returns 0 and does not modify state. +// +// Phase 3.7f of #5989: while we are still inside networkChangeGracePeriod +// after the most recent Reset() (typically a srReconnect / network-change +// event), the suspend delay is capped at networkChangeRetryDelay and the +// long-term exponential schedule is NOT advanced. Once the grace window +// elapses, normal exponential backoff applies. This lets the second ICE +// pair-check run while a fresh LTE/Wi-Fi NAT mapping is still warm, +// without flooding signaling for chronically broken peers. +func (s *iceBackoffState) markFailure() time.Duration { + s.mu.Lock() + defer s.mu.Unlock() + if s.maxBackoff == 0 { + return 0 + } + s.failures++ + + var delay time.Duration + if !s.lastResetAt.IsZero() && time.Since(s.lastResetAt) < networkChangeGracePeriod { + delay = networkChangeRetryDelay + } else { + delay = s.bo.NextBackOff() + } + + s.nextRetry = time.Now().Add(delay) + s.suspended = true + return delay +} + +func (s *iceBackoffState) Snapshot() BackoffSnapshot { + s.mu.Lock() + defer s.mu.Unlock() + return BackoffSnapshot{ + Failures: s.failures, + NextRetry: s.nextRetry, + Suspended: s.suspended && time.Now().Before(s.nextRetry), + } +} + +// markSuccess clears the failure counter and resets the internal backoff +// to its initial interval. Called when pion reports ConnectionStateConnected. +func (s *iceBackoffState) markSuccess() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() +} + +// Reset is the hard reset triggered by interface-change or mode-push. +// In addition to clearing the failure counter and exponential schedule, +// it stamps lastResetAt so that markFailure can apply the +// post-network-change grace period (Phase 3.7f). +func (s *iceBackoffState) Reset() { + s.mu.Lock() + defer s.mu.Unlock() + s.failures = 0 + s.suspended = false + s.bo.Reset() + s.lastResetAt = time.Now() +} + +// SetMaxBackoff updates the cap. Called from ConnMgr.UpdatedRemotePeerConfig +// when the server pushes a new value. Rebuilds the internal backoff with +// the new schedule but preserves the failure counter. +func (s *iceBackoffState) SetMaxBackoff(d time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + if d == s.maxBackoff { + return + } + s.maxBackoff = d + s.bo = buildBackoff(d) +} diff --git a/client/internal/peer/ice_backoff_test.go b/client/internal/peer/ice_backoff_test.go new file mode 100644 index 00000000000..85fd3a5a2e0 --- /dev/null +++ b/client/internal/peer/ice_backoff_test.go @@ -0,0 +1,182 @@ +package peer + +import ( + "testing" + "time" +) + +func TestIceBackoff_InitialState(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + if s.IsSuspended() { + t.Fatal("fresh state must not be suspended") + } + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("fresh state snapshot wrong: %+v", snap) + } +} + +func TestIceBackoff_SetMaxBackoff_Live(t *testing.T) { + s := newIceBackoff(1 * time.Minute) // tight cap + s.markFailure() // expect ~1m + s.markFailure() // expect ~1m (capped) + d2 := s.markFailure() // still ~1m + if d2 > 90*time.Second { + t.Errorf("with 1m cap, third failure should be ~1m, got %v", d2) + } + // Live update to 1h cap + s.SetMaxBackoff(60 * time.Minute) + // Subsequent failure produces a non-zero delay (jitter-dependent + // but should be > 0 since backoff was rebuilt). + d3 := s.markFailure() + if d3 <= 0 { + t.Errorf("after SetMaxBackoff: must produce non-zero delay, got %v", d3) + } +} + +func TestIceBackoff_SuccessReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + for i := 0; i < 5; i++ { + s.markFailure() + } + s.markSuccess() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after markSuccess: %+v", snap) + } + // Next failure must be back to step-1 magnitude (~1m) + delay := s.markFailure() + if delay > 70*time.Second { + t.Errorf("after success-reset, first failure must restart at ~1m, got %v", delay) + } +} + +func TestIceBackoff_HardReset(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + s.markFailure() + s.Reset() + snap := s.Snapshot() + if snap.Failures != 0 || snap.Suspended { + t.Fatalf("after Reset: %+v", snap) + } +} + +func TestIceBackoff_SuspendedExpires(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.markFailure() + // Force nextRetry to past + s.mu.Lock() + s.nextRetry = time.Now().Add(-1 * time.Second) + s.mu.Unlock() + if s.IsSuspended() { + t.Fatal("expired suspend must report not suspended") + } +} + +func TestIceBackoff_ExponentialDoubling(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + expectedRanges := []struct { + min, max time.Duration + }{ + {50 * time.Second, 70 * time.Second}, // ~1m + {100 * time.Second, 140 * time.Second}, // ~2m + {210 * time.Second, 270 * time.Second}, // ~4m + {420 * time.Second, 540 * time.Second}, // ~8m + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + {810 * time.Second, 990 * time.Second}, // ~15m capped + } + for i, exp := range expectedRanges { + delay := s.markFailure() + if delay < exp.min || delay > exp.max { + t.Errorf("failure #%d: delay %v outside expected range [%v, %v]", + i+1, delay, exp.min, exp.max) + } + } +} + +func TestIceBackoff_MaxBackoffOverride(t *testing.T) { + s := newIceBackoff(5 * time.Minute) // 300s cap + delays := []time.Duration{} + for i := 0; i < 5; i++ { + delays = append(delays, s.markFailure()) + } + // Last few should be capped at ~5m (300s) regardless of multiplier + for i := 2; i < 5; i++ { + if delays[i] > 6*time.Minute { + t.Errorf("failure #%d: delay %v exceeds 5m cap", i+1, delays[i]) + } + } +} + +func TestIceBackoff_MaxBackoffZero_Disabled(t *testing.T) { + s := newIceBackoff(0) + delay := s.markFailure() + if delay != 0 { + t.Errorf("disabled backoff must return 0 delay, got %v", delay) + } + if s.IsSuspended() { + t.Fatal("disabled backoff must not suspend") + } +} + +func TestIceBackoff_GracePeriodAfterReset_ShortDelay(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() // simulate srReconnect / network-change + + delay := s.markFailure() + if delay != networkChangeRetryDelay { + t.Fatalf("within grace window: expected %v, got %v", networkChangeRetryDelay, delay) + } + + // A second failure inside the grace window also uses the short delay + // (long-term exponential schedule is NOT advanced). + delay2 := s.markFailure() + if delay2 != networkChangeRetryDelay { + t.Fatalf("second failure inside grace: expected %v, got %v", networkChangeRetryDelay, delay2) + } +} + +func TestIceBackoff_GraceExpired_NormalExponential(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + s.Reset() + + // Force lastResetAt into the past so the grace window has expired. + s.mu.Lock() + s.lastResetAt = time.Now().Add(-2 * networkChangeGracePeriod) + s.mu.Unlock() + + delay := s.markFailure() + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("outside grace: expected ~1m exponential delay, got %v", delay) + } +} + +func TestIceBackoff_NoGraceWithoutReset(t *testing.T) { + // Fresh state without an explicit Reset must use the normal exponential + // schedule (lastResetAt is zero so the grace path does not apply). + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay < 50*time.Second { + t.Fatalf("fresh state without Reset: expected ~1m delay, got %v", delay) + } +} + +func TestIceBackoff_FirstFailure(t *testing.T) { + s := newIceBackoff(15 * time.Minute) + delay := s.markFailure() + if delay <= 0 { + t.Fatalf("first failure must produce a positive delay, got %v", delay) + } + if delay < 50*time.Second || delay > 70*time.Second { + t.Fatalf("first failure delay should be ~1m (with 10%% jitter), got %v", delay) + } + if !s.IsSuspended() { + t.Fatal("after first failure must be suspended") + } + snap := s.Snapshot() + if snap.Failures != 1 || !snap.Suspended { + t.Fatalf("snapshot wrong: %+v", snap) + } +} diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index e8e61f660c9..bb683cc9e9e 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -70,6 +70,35 @@ type State struct { RosenpassEnabled bool SSHHostKey []byte routes map[string]struct{} + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int + IceBackoffNextRetry time.Time + IceBackoffSuspended bool + // Phase 3.7i (#5989): true = peer is in d.peers; false = in d.offlinePeers. + ServerOnline bool + RemoteEffectiveConnectionMode string + RemoteConfiguredConnectionMode string + RemoteEffectiveRelayTimeoutSecs uint32 + RemoteEffectiveP2PTimeoutSecs uint32 + RemoteEffectiveP2PRetryMaxSecs uint32 + RemoteConfiguredRelayTimeoutSecs uint32 + RemoteConfiguredP2PTimeoutSecs uint32 + RemoteConfiguredP2PRetryMaxSecs uint32 + RemoteGroups []string + RemoteLastSeenAtServer time.Time + // Phase 3.7i (#5989): live mgmt-server-tracked liveness flag from + // RemotePeerConfig.LiveOnline (= peer.Status.Connected on the server). + // True = peer is currently heartbeating to mgmt; false = configured + // but currently unreachable (hardware/network down). Used by the + // counter widget to distinguish "online" from "offline" in the + // user-intuitive sense, independent of the login-expiration split. + RemoteLiveOnline bool + // RemoteServerLivenessKnown is the explicit "I authoritatively know + // this peer's liveness" marker from a phase-3.7i+ management server. + // Old servers leave this false and the counter falls back to its + // LastSeenAtServer-zero heuristic; new servers set it true so the + // counter trusts RemoteLiveOnline directly. + RemoteServerLivenessKnown bool } // AddRoute add a single route to routes map @@ -160,6 +189,13 @@ type FullStatus struct { NumOfForwardingRules int LazyConnectionEnabled bool Events []*proto.SystemEvent + // Phase 3.7i (#5989): aggregate counters. + ConfiguredPeersTotal uint32 + ServerOnlinePeers uint32 + P2PConnectedPeers uint32 + RelayedConnectedPeers uint32 + IdleOnlinePeers uint32 + ServerOfflinePeers uint32 } type StatusChangeSubscription struct { @@ -219,6 +255,11 @@ type Status struct { routeIDLookup routeIDLookup wgIface WGIfaceStatus + + // Phase 3.7i (#5989): per-peer state-change subscription. Set by + // Engine; nil-checked everywhere. Fired AFTER releasing d.mux to + // avoid holding the lock through user code. + connStateListener func(pubkey string, st State) } // NewRecorder returns a new Status instance @@ -235,6 +276,59 @@ func NewRecorder(mgmAddress string) *Status { } } +// SetConnStateListener registers a callback that is called after each +// meaningful per-peer connection-state transition. The callback is +// invoked AFTER d.mux is released (Extract-Method pattern). Safe to +// call concurrently; may be set to nil to unregister. +// Phase 3.7i of #5989. +func (d *Status) SetConnStateListener(fn func(pubkey string, st State)) { + d.mux.Lock() + d.connStateListener = fn + d.mux.Unlock() +} + +// notifyConnStateChange returns a closure the caller invokes AFTER +// unlocking d.mux to deliver the state to the listener without holding +// the lock through user code. Caller must hold d.mux when calling this. +// Returns a no-op closure when no listener is registered. +func (d *Status) notifyConnStateChange(peerPubKey string, peerState State) func() { + listener := d.connStateListener + if listener == nil { + return func() {} + } + stateCopy := peerState + return func() { listener(peerPubKey, stateCopy) } +} + +// notifyPeerListChanged fires a peer-list-changed notification using the +// current peer count. Phase 3.7i: thin wrapper around the notifier so +// callers in UpdatePeerRemoteMeta and similar paths don't need to know +// about d.numOfPeers() and d.notifier internals. +// +// Caller must hold d.mux (this method reads d.peers/d.offlinePeers via +// numOfPeers and assumes consistent state). +// +//nolint:unused // wired up in a follow-up commit (UpdatePeerRemoteMeta path) +func (d *Status) notifyPeerListChanged() { + d.notifier.peerListChanged(d.numOfPeers()) +} + +// notifyPeerStateChangeListeners snapshots the per-peer router-state for +// peerID under the lock and dispatches it to registered subscribers in +// a goroutine, so the dispatch itself does not block on d.mux. Called +// when a peer's UI-relevant fields (LiveOnline, EffectiveConnectionMode, +// material ICE/Relay change) flip and subscribers need an immediate +// push instead of waiting for the next periodic poll. Phase 3.7i. +// +// Caller must hold d.mux when calling this. +func (d *Status) notifyPeerStateChangeListeners(peerID string) { + snapshot := d.snapshotRouterPeersLocked(peerID, true) + if snapshot == nil { + return + } + go d.dispatchRouterPeers(peerID, snapshot) +} + func (d *Status) SetRelayMgr(manager *relayClient.Manager) { d.mux.Lock() defer d.mux.Unlock() @@ -319,12 +413,21 @@ func (d *Status) RemovePeer(peerPubKey string) error { // UpdatePeerState updates peer status func (d *Status) UpdatePeerState(receivedState State) error { + notifyFn, err := d.updatePeerStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -357,7 +460,101 @@ func (d *Status) UpdatePeerState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasConnStatusChanged(oldState, receivedState.ConnStatus) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil +} + +// UpdatePeerIceBackoff updates the ICE-backoff snapshot for a peer. +// Called by Conn.onICEFailed / onICEConnected so that the daemon +// status reflects current backoff state. Phase 3 of #5989. +func (d *Status) UpdatePeerIceBackoff(pubKey string, snap BackoffSnapshot) { + d.mux.Lock() + defer d.mux.Unlock() + + peerState, ok := d.peers[pubKey] + if !ok { + return + } + peerState.IceBackoffFailures = snap.Failures + peerState.IceBackoffNextRetry = snap.NextRetry + peerState.IceBackoffSuspended = snap.Suspended + d.peers[pubKey] = peerState +} + +// RemoteMeta is the slice of per-peer fields RemotePeerConfig populates. +// Phase 3.7i of #5989. +type RemoteMeta struct { + EffectiveConnectionMode string + EffectiveRelayTimeoutSecs uint32 + EffectiveP2PTimeoutSecs uint32 + EffectiveP2PRetryMaxSecs uint32 + ConfiguredConnectionMode string + ConfiguredRelayTimeoutSecs uint32 + ConfiguredP2PTimeoutSecs uint32 + ConfiguredP2PRetryMaxSecs uint32 + Groups []string + LastSeenAtServer time.Time + LiveOnline bool + ServerLivenessKnown bool +} + +// UpdatePeerRemoteMeta sets the RemotePeerConfig-derived fields on the +// peer's State without touching ConnStatus or transport stats. Looks up +// the peer in both online (d.peers) and offline (d.offlinePeers) maps. +// Phase 3.7i of #5989. +func (d *Status) UpdatePeerRemoteMeta(pubKey string, meta RemoteMeta) error { + d.mux.Lock() + defer d.mux.Unlock() + st, online := d.peers[pubKey] + if online { + st.RemoteEffectiveConnectionMode = meta.EffectiveConnectionMode + st.RemoteConfiguredConnectionMode = meta.ConfiguredConnectionMode + st.RemoteEffectiveRelayTimeoutSecs = meta.EffectiveRelayTimeoutSecs + st.RemoteEffectiveP2PTimeoutSecs = meta.EffectiveP2PTimeoutSecs + st.RemoteEffectiveP2PRetryMaxSecs = meta.EffectiveP2PRetryMaxSecs + st.RemoteConfiguredRelayTimeoutSecs = meta.ConfiguredRelayTimeoutSecs + st.RemoteConfiguredP2PTimeoutSecs = meta.ConfiguredP2PTimeoutSecs + st.RemoteConfiguredP2PRetryMaxSecs = meta.ConfiguredP2PRetryMaxSecs + st.RemoteGroups = meta.Groups + st.RemoteLastSeenAtServer = meta.LastSeenAtServer + st.RemoteLiveOnline = meta.LiveOnline + st.RemoteServerLivenessKnown = meta.ServerLivenessKnown + d.peers[pubKey] = st + return nil + } + for i := range d.offlinePeers { + if d.offlinePeers[i].PubKey == pubKey { + d.offlinePeers[i].RemoteEffectiveConnectionMode = meta.EffectiveConnectionMode + d.offlinePeers[i].RemoteConfiguredConnectionMode = meta.ConfiguredConnectionMode + d.offlinePeers[i].RemoteEffectiveRelayTimeoutSecs = meta.EffectiveRelayTimeoutSecs + d.offlinePeers[i].RemoteEffectiveP2PTimeoutSecs = meta.EffectiveP2PTimeoutSecs + d.offlinePeers[i].RemoteEffectiveP2PRetryMaxSecs = meta.EffectiveP2PRetryMaxSecs + d.offlinePeers[i].RemoteConfiguredRelayTimeoutSecs = meta.ConfiguredRelayTimeoutSecs + d.offlinePeers[i].RemoteConfiguredP2PTimeoutSecs = meta.ConfiguredP2PTimeoutSecs + d.offlinePeers[i].RemoteConfiguredP2PRetryMaxSecs = meta.ConfiguredP2PRetryMaxSecs + d.offlinePeers[i].RemoteGroups = meta.Groups + d.offlinePeers[i].RemoteLastSeenAtServer = meta.LastSeenAtServer + d.offlinePeers[i].RemoteLiveOnline = meta.LiveOnline + d.offlinePeers[i].RemoteServerLivenessKnown = meta.ServerLivenessKnown + return nil + } + } + return fmt.Errorf("peer %s not found in either map", pubKey) +} + +// TimestampOrZero converts a *timestamppb.Timestamp to time.Time, +// returning zero-time when the proto pointer is nil. Used by engine.go +// (Task 3.3) when populating RemoteMeta from RemotePeerConfig where +// last_seen_at_server may be unset for peers that pre-date Phase 3.7i. +// Phase 3.7i of #5989. +func TimestampOrZero(t *timestamppb.Timestamp) time.Time { + if t == nil { + return time.Time{} + } + return t.AsTime() } func (d *Status) AddPeerStateRoute(peer string, route string, resourceId route.ResID) error { @@ -421,16 +618,25 @@ func (d *Status) CheckRoutes(ip netip.Addr) ([]byte, bool) { } func (d *Status) UpdatePeerICEState(receivedState State) error { + notifyFn, err := d.updatePeerICEStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerICEStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } - oldState := peerState.ConnStatus - oldIsRelayed := peerState.Relayed + oldSnapshot := peerState + oldStatus := peerState.ConnStatus peerState.ConnStatus = receivedState.ConnStatus peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate @@ -443,10 +649,11 @@ func (d *Status) UpdatePeerICEState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) - notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + notifyList := hasConnStatusChanged(oldStatus, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) numPeers := d.numOfPeers() + materialICE := hasMaterialICEChange(oldSnapshot, peerState) d.mux.Unlock() @@ -456,20 +663,36 @@ func (d *Status) UpdatePeerICEState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + if materialICE { + d.notifyPeerStateChangeListeners(receivedState.PubKey) + } + + if hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerRelayedState(receivedState State) error { + notifyFn, err := d.updatePeerRelayedStateLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerRelayedStateLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } - oldState := peerState.ConnStatus - oldIsRelayed := peerState.Relayed + oldSnapshot := peerState + oldStatus := peerState.ConnStatus peerState.ConnStatus = receivedState.ConnStatus peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate @@ -479,10 +702,11 @@ func (d *Status) UpdatePeerRelayedState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) - notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + notifyList := hasConnStatusChanged(oldStatus, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) numPeers := d.numOfPeers() + materialRelay := hasMaterialRelayChange(oldSnapshot, peerState) d.mux.Unlock() @@ -492,16 +716,32 @@ func (d *Status) UpdatePeerRelayedState(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + if materialRelay { + d.notifyPeerStateChangeListeners(receivedState.PubKey) + } + + if hasStatusOrRelayedChange(oldStatus, receivedState.ConnStatus, oldSnapshot.Relayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error { + notifyFn, err := d.updatePeerRelayedStateToDisconnectedLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerRelayedStateToDisconnectedLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -527,16 +767,29 @@ func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { + notifyFn, err := d.updatePeerICEStateToDisconnectedLocked(receivedState) + if err != nil { + return err + } + notifyFn() + return nil +} + +func (d *Status) updatePeerICEStateToDisconnectedLocked(receivedState State) (func(), error) { d.mux.Lock() peerState, ok := d.peers[receivedState.PubKey] if !ok { d.mux.Unlock() - return errors.New("peer doesn't exist") + return func() {}, errors.New("peer doesn't exist") } oldState := peerState.ConnStatus @@ -565,7 +818,11 @@ func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { if notifyRouter { d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } - return nil + + if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { + return d.notifyConnStateChange(receivedState.PubKey, peerState), nil + } + return func() {}, nil } // UpdateWireGuardPeerState updates the WireGuard bits of the peer state @@ -595,6 +852,47 @@ func hasConnStatusChanged(oldStatus, newStatus ConnStatus) bool { return newStatus != oldStatus } +// hasMaterialICEChange returns true when any field that the management +// server's "endpoint flips immediate" UX promise depends on has moved. +// Beyond the status/relayed flip already covered by hasStatusOrRelayedChange, +// this catches: +// - Local/remote ICE candidate endpoint changes (NAT-traversal roaming) +// - Local/remote ICE candidate type changes (host -> srflx -> relay) +// +// Without this an in-place endpoint flip would only surface to the +// dashboard at the next 60 s heartbeat tick. +func hasMaterialICEChange(oldState, newState State) bool { + if hasStatusOrRelayedChange(oldState.ConnStatus, newState.ConnStatus, oldState.Relayed, newState.Relayed) { + return true + } + if oldState.LocalIceCandidateEndpoint != newState.LocalIceCandidateEndpoint { + return true + } + if oldState.RemoteIceCandidateEndpoint != newState.RemoteIceCandidateEndpoint { + return true + } + if oldState.LocalIceCandidateType != newState.LocalIceCandidateType { + return true + } + if oldState.RemoteIceCandidateType != newState.RemoteIceCandidateType { + return true + } + return false +} + +// hasMaterialRelayChange returns true when relayed-state material fields +// have changed. Beyond status/relayed, this catches relay-server flips +// (a peer being moved to a different relay endpoint). +func hasMaterialRelayChange(oldState, newState State) bool { + if hasStatusOrRelayedChange(oldState.ConnStatus, newState.ConnStatus, oldState.Relayed, newState.Relayed) { + return true + } + if oldState.RelayServerAddress != newState.RelayServerAddress { + return true + } + return false +} + // UpdatePeerFQDN update peer's state fqdn only func (d *Status) UpdatePeerFQDN(peerPubKey, fqdn string) error { d.mux.Lock() @@ -1042,11 +1340,63 @@ func (d *Status) GetFullStatus() FullStatus { fullStatus.LocalPeerState = d.localPeer + var p2p, relayed, idle, offline uint32 + + // Phase 3.7i (#5989) counter semantics: + // ServerOnline := peer.Status.Connected on the management server + // (RemotePeerConfig.live_online → State.RemoteLiveOnline) + // Offline := configured but NOT live (heartbeat is stale OR + // login expired). For login-expired peers, the + // daemon already places them in d.offlinePeers via + // updateOfflinePeers; the rest live in d.peers + // regardless of their live status, so we additionally + // check RemoteLiveOnline. + // + // Backward-compat fallback: if the management server pre-dates + // Phase 3.7i, RemoteServerLivenessKnown is false (zero value of the + // never-populated proto field). In that case we cannot trust + // LiveOnline so we fall back to the legacy heuristic: assume online + // unless LastSeenAtServer is set AND LiveOnline is explicitly false. + // Phase-3.7i+ servers set ServerLivenessKnown=true and we then trust + // LiveOnline directly — both for "yes online" and "no offline". for _, status := range d.peers { + var isLive bool + if status.RemoteServerLivenessKnown { + isLive = status.RemoteLiveOnline + } else { + mgmtKnowsLiveness := !status.RemoteLastSeenAtServer.IsZero() + isLive = status.RemoteLiveOnline || !mgmtKnowsLiveness + } + if isLive { + status.ServerOnline = true + switch { + case status.ConnStatus == StatusConnected && !status.Relayed: + p2p++ + case status.ConnStatus == StatusConnected && status.Relayed: + relayed++ + default: + idle++ + } + } else { + status.ServerOnline = false + offline++ + } + fullStatus.Peers = append(fullStatus.Peers, status) + } + for _, status := range d.offlinePeers { + // Login-expired peers are always offline. + status.ServerOnline = false + offline++ fullStatus.Peers = append(fullStatus.Peers, status) } - fullStatus.Peers = append(fullStatus.Peers, d.offlinePeers...) + fullStatus.P2PConnectedPeers = p2p + fullStatus.RelayedConnectedPeers = relayed + fullStatus.IdleOnlinePeers = idle + fullStatus.ServerOfflinePeers = offline + fullStatus.ServerOnlinePeers = p2p + relayed + idle + fullStatus.ConfiguredPeersTotal = fullStatus.ServerOnlinePeers + offline + fullStatus.Events = d.GetEventHistory() return fullStatus } @@ -1324,6 +1674,14 @@ func (fs FullStatus) ToProto() *proto.FullStatus { pbFullStatus.NumberOfForwardingRules = int32(fs.NumOfForwardingRules) pbFullStatus.LazyConnectionEnabled = fs.LazyConnectionEnabled + // Phase 3.7i (#5989): aggregate counters. + pbFullStatus.ConfiguredPeersTotal = fs.ConfiguredPeersTotal + pbFullStatus.ServerOnlinePeers = fs.ServerOnlinePeers + pbFullStatus.P2PConnectedPeers = fs.P2PConnectedPeers + pbFullStatus.RelayedConnectedPeers = fs.RelayedConnectedPeers + pbFullStatus.IdleOnlinePeers = fs.IdleOnlinePeers + pbFullStatus.ServerOfflinePeers = fs.ServerOfflinePeers + pbFullStatus.LocalPeerState.Networks = maps.Keys(fs.LocalPeerState.Routes) for _, peerState := range fs.Peers { @@ -1348,6 +1706,23 @@ func (fs FullStatus) ToProto() *proto.FullStatus { Networks: networks, Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, + // Phase 3.7i (#5989): per-peer remote meta fields. + ServerOnline: peerState.ServerOnline, + Groups: peerState.RemoteGroups, + EffectiveConnectionMode: peerState.RemoteEffectiveConnectionMode, + ConfiguredConnectionMode: peerState.RemoteConfiguredConnectionMode, + EffectiveRelayTimeoutSecs: peerState.RemoteEffectiveRelayTimeoutSecs, + EffectiveP2PTimeoutSecs: peerState.RemoteEffectiveP2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: peerState.RemoteEffectiveP2PRetryMaxSecs, + ConfiguredRelayTimeoutSecs: peerState.RemoteConfiguredRelayTimeoutSecs, + ConfiguredP2PTimeoutSecs: peerState.RemoteConfiguredP2PTimeoutSecs, + ConfiguredP2PRetryMaxSecs: peerState.RemoteConfiguredP2PRetryMaxSecs, + } + if !peerState.RemoteLastSeenAtServer.IsZero() { + pbPeerState.LastSeenAtServer = timestamppb.New(peerState.RemoteLastSeenAtServer) } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } diff --git a/client/internal/peer/status_test.go b/client/internal/peer/status_test.go index 272638750ff..415dec35050 100644 --- a/client/internal/peer/status_test.go +++ b/client/internal/peer/status_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "sync/atomic" "testing" "time" @@ -243,7 +244,120 @@ func TestGetFullStatus(t *testing.T) { fullStatus := status.GetFullStatus() + // GetFullStatus sets ServerOnline=true for peers in d.peers. + peerState1.ServerOnline = true + peerState2.ServerOnline = true + assert.Equal(t, managementState, fullStatus.ManagementState, "management status should be equal") assert.Equal(t, signalState, fullStatus.SignalState, "signal status should be equal") assert.ElementsMatch(t, []State{peerState1, peerState2}, fullStatus.Peers, "peers states should match") } + +// TestStatus_ConnStateListener_CalledAfterUnlock verifies that the +// connStateListener registered via SetConnStateListener is invoked AFTER +// d.mux is released (Extract-Method guarantee). Phase 3.7i of #5989. +func TestStatus_ConnStateListener_CalledAfterUnlock(t *testing.T) { + d := NewRecorder("") + var listenerCalled atomic.Bool + var listenerObservedLockHeld atomic.Bool + + d.SetConnStateListener(func(_ string, _ State) { + // Try TryLock — if the locked body still holds mux this returns + // false. We record the result so the assertion below can report it. + if d.mux.TryLock() { + listenerObservedLockHeld.Store(false) + d.mux.Unlock() + } else { + listenerObservedLockHeld.Store(true) + } + listenerCalled.Store(true) + }) + + if err := d.AddPeer("peerA", "fqdn-A", "100.64.0.1"); err != nil { + t.Fatal(err) + } + // Trigger a ConnStatus transition (Idle -> Connected) which must fire + // the listener through updatePeerStateLocked. + if err := d.UpdatePeerState(State{ + PubKey: "peerA", + ConnStatus: StatusConnected, + ConnStatusUpdate: time.Now(), + }); err != nil { + t.Fatal(err) + } + + if !listenerCalled.Load() { + t.Error("listener not invoked") + } + if listenerObservedLockHeld.Load() { + t.Error("listener called while mux still held — Extract-Method refactor incomplete") + } +} + +// TestStatus_UpdatePeerRemoteMeta_PreservesConnStatus verifies that +// UpdatePeerRemoteMeta sets Remote* fields without touching ConnStatus. +// Phase 3.7i of #5989. +func TestStatus_UpdatePeerRemoteMeta_PreservesConnStatus(t *testing.T) { + d := NewRecorder("") + // Add a peer first so it exists in d.peers (the map). + if err := d.AddPeer("peerA", "fqdnA", "100.64.0.2"); err != nil { + t.Fatal(err) + } + // Set its ConnStatus to Connected so we can verify it is preserved. + if err := d.UpdatePeerState(State{ + PubKey: "peerA", + ConnStatus: StatusConnected, + Relayed: false, + }); err != nil { + t.Fatal(err) + } + + if err := d.UpdatePeerRemoteMeta("peerA", RemoteMeta{ + EffectiveConnectionMode: "p2p-dynamic", + Groups: []string{"router"}, + }); err != nil { + t.Fatal(err) + } + + d.mux.Lock() + got := d.peers["peerA"] + d.mux.Unlock() + if got.ConnStatus != StatusConnected { + t.Errorf("ConnStatus changed: %v", got.ConnStatus) + } + if got.RemoteEffectiveConnectionMode != "p2p-dynamic" { + t.Errorf("EffectiveMode not set: %s", got.RemoteEffectiveConnectionMode) + } + if len(got.RemoteGroups) != 1 || got.RemoteGroups[0] != "router" { + t.Errorf("Groups not set: %v", got.RemoteGroups) + } +} + +// TestStatus_GetFullStatus_SetsServerOnlineAndCounters verifies aggregate +// counters and ServerOnline flag set in GetFullStatus. Phase 3.7i of #5989. +func TestStatus_GetFullStatus_SetsServerOnlineAndCounters(t *testing.T) { + d := NewRecorder("") + d.mux.Lock() + d.peers["a"] = State{PubKey: "a", ConnStatus: StatusConnected, Relayed: false} + d.peers["b"] = State{PubKey: "b", ConnStatus: StatusConnected, Relayed: true} + d.peers["c"] = State{PubKey: "c", ConnStatus: StatusIdle} + d.offlinePeers = []State{{PubKey: "d"}} + d.mux.Unlock() + + fs := d.GetFullStatus() + if fs.P2PConnectedPeers != 1 || fs.RelayedConnectedPeers != 1 || + fs.IdleOnlinePeers != 1 || fs.ServerOfflinePeers != 1 || + fs.ConfiguredPeersTotal != 4 { + t.Errorf("counters wrong: P2P=%d Relayed=%d Idle=%d Offline=%d Total=%d", + fs.P2PConnectedPeers, fs.RelayedConnectedPeers, + fs.IdleOnlinePeers, fs.ServerOfflinePeers, fs.ConfiguredPeersTotal) + } + for _, st := range fs.Peers { + if st.PubKey == "d" && st.ServerOnline { + t.Error("offline peer must have ServerOnline=false") + } + if st.PubKey != "d" && !st.ServerOnline { + t.Errorf("online peer %s must have ServerOnline=true", st.PubKey) + } + } +} diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 29bf5aaaa74..f4c881c87cc 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -101,6 +101,20 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { defer w.muxAgent.Unlock() if w.agent != nil || w.agentConnecting { + // Phase 3.7c (#5989) re-introduces the Guard-Loop Fix from PR #5805. + // While the local ICE agent is mid-connection, ignore any incoming + // offer regardless of sessionID. Both sides' Guards fire fresh + // offers every ~800ms-30s (driven by their own iceRetryState + + // srReconnect events). If we tear down on every sessionID-change, + // the in-flight ICE pair-checks (~5-10s) never complete -- the + // remote's freshly-recreated agent generates yet another sessionID, + // loops back, infinite recreate cycle. Empirically observed on + // badmitterndorf during LTE-carrier instability: 5 different + // sessionIDs received from the remote in 2min, no P2P convergence. + if w.agentConnecting { + w.log.Debugf("agent connecting, skipping new offer (sessionID %s) to let pair-checks finish", remoteOfferAnswer.SessionIDString()) + return + } // backward compatibility with old clients that do not send session ID if remoteOfferAnswer.SessionID == nil { w.log.Debugf("agent already exists, skipping the offer") @@ -201,6 +215,21 @@ func (w *WorkerICE) InProgress() bool { return w.agentConnecting } +// IsConnected returns true when pion's ICE agent reports Connected and +// has not yet transitioned to Disconnected/Failed/Closed. Used by +// Conn.onNetworkChange (Phase 3.7g of #5989) to skip a needless +// workerICE.Close when an srReconnect/network-change event arrives but +// the existing P2P session is still alive end-to-end (typical for a +// brief signal-server outage while peer-to-peer UDP keeps flowing). +// Closing the agent in that case forces a 15-25 s renegotiation cycle +// and a Relay→ICE handover gap that the user would observe as a ping +// dropout, even though no real peer-to-peer connectivity loss occurred. +func (w *WorkerICE) IsConnected() bool { + w.muxAgent.Lock() + defer w.muxAgent.Unlock() + return w.agent != nil && w.lastKnownState == ice.ConnectionStateConnected +} + func (w *WorkerICE) Close() { w.muxAgent.Lock() defer w.muxAgent.Unlock() @@ -520,6 +549,8 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia case ice.ConnectionStateConnected: w.lastKnownState = ice.ConnectionStateConnected w.logSuccessfulPaths(agent) + // Phase 3 of #5989: reset backoff on ICE success. + w.conn.onICEConnected() return case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected, ice.ConnectionStateClosed: // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to @@ -531,6 +562,13 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia w.lastKnownState = ice.ConnectionStateDisconnected w.conn.onICEStateDisconnected(sessionChanged) } + + // Phase 3 of #5989: record failure in backoff only for true + // ICE failure (not for the synthetic Closed event that occurs + // when we recreate the agent on reconnect). + if state == ice.ConnectionStateFailed { + w.conn.onICEFailed() + } default: return } diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index 20c615d579d..2364392c702 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -96,6 +96,11 @@ type ConfigInput struct { LazyConnectionEnabled *bool + ConnectionMode *string + RelayTimeoutSeconds *uint32 + P2pTimeoutSeconds *uint32 + P2pRetryMaxSeconds *uint32 + MTU *uint16 } @@ -170,6 +175,13 @@ type Config struct { LazyConnectionEnabled bool + ConnectionMode string `json:",omitempty"` + RelayTimeoutSeconds uint32 `json:",omitempty"` + P2pTimeoutSeconds uint32 `json:",omitempty"` + // P2pRetryMaxSeconds caps the ICE-failure backoff schedule. 0 = use + // management-server value. Phase 3 of #5989. + P2pRetryMaxSeconds uint32 `json:"p2p_retry_max_seconds,omitempty"` + MTU uint16 } @@ -593,6 +605,27 @@ func (config *Config) apply(input ConfigInput) (updated bool, err error) { updated = true } + if input.ConnectionMode != nil && *input.ConnectionMode != config.ConnectionMode { + log.Infof("switching connection mode to %s", *input.ConnectionMode) + config.ConnectionMode = *input.ConnectionMode + updated = true + } + if input.RelayTimeoutSeconds != nil && *input.RelayTimeoutSeconds != config.RelayTimeoutSeconds { + log.Infof("switching relay timeout to %d seconds", *input.RelayTimeoutSeconds) + config.RelayTimeoutSeconds = *input.RelayTimeoutSeconds + updated = true + } + if input.P2pTimeoutSeconds != nil && *input.P2pTimeoutSeconds != config.P2pTimeoutSeconds { + log.Infof("switching p2p timeout to %d seconds", *input.P2pTimeoutSeconds) + config.P2pTimeoutSeconds = *input.P2pTimeoutSeconds + updated = true + } + if input.P2pRetryMaxSeconds != nil && *input.P2pRetryMaxSeconds != config.P2pRetryMaxSeconds { + log.Infof("switching p2p retry max to %d seconds", *input.P2pRetryMaxSeconds) + config.P2pRetryMaxSeconds = *input.P2pRetryMaxSeconds + updated = true + } + if input.MTU != nil && *input.MTU != config.MTU { log.Infof("updating MTU to %d (old value %d)", *input.MTU, config.MTU) config.MTU = *input.MTU diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 11e7877f2df..8f8a351ecb4 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -342,8 +342,18 @@ type LoginRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,37,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,38,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,39,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LoginRequest) Reset() { @@ -650,6 +660,34 @@ func (x *LoginRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *LoginRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *LoginRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *LoginRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type LoginResponse struct { state protoimpl.MessageState `protogen:"open.v1"` NeedsSSOLogin bool `protobuf:"varint,1,opt,name=needsSSOLogin,proto3" json:"needsSSOLogin,omitempty"` @@ -1182,8 +1220,28 @@ type GetConfigResponse struct { EnableSSHRemotePortForwarding bool `protobuf:"varint,23,opt,name=enableSSHRemotePortForwarding,proto3" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth bool `protobuf:"varint,25,opt,name=disableSSHAuth,proto3" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL int32 `protobuf:"varint,26,opt,name=sshJWTCacheTTL,proto3" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + ConnectionMode string `protobuf:"bytes,27,opt,name=connection_mode,json=connectionMode,proto3" json:"connection_mode,omitempty"` + P2PTimeoutSeconds uint32 `protobuf:"varint,28,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds uint32 `protobuf:"varint,29,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + P2PRetryMaxSeconds uint32 `protobuf:"varint,30,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3" json:"p2p_retry_max_seconds,omitempty"` + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + ServerPushedConnectionMode string `protobuf:"bytes,31,opt,name=server_pushed_connection_mode,json=serverPushedConnectionMode,proto3" json:"server_pushed_connection_mode,omitempty"` + ServerPushedRelayTimeoutSeconds uint32 `protobuf:"varint,32,opt,name=server_pushed_relay_timeout_seconds,json=serverPushedRelayTimeoutSeconds,proto3" json:"server_pushed_relay_timeout_seconds,omitempty"` + ServerPushedP2PTimeoutSeconds uint32 `protobuf:"varint,33,opt,name=server_pushed_p2p_timeout_seconds,json=serverPushedP2pTimeoutSeconds,proto3" json:"server_pushed_p2p_timeout_seconds,omitempty"` + ServerPushedP2PRetryMaxSeconds uint32 `protobuf:"varint,34,opt,name=server_pushed_p2p_retry_max_seconds,json=serverPushedP2pRetryMaxSeconds,proto3" json:"server_pushed_p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetConfigResponse) Reset() { @@ -1398,6 +1456,62 @@ func (x *GetConfigResponse) GetSshJWTCacheTTL() int32 { return 0 } +func (x *GetConfigResponse) GetConnectionMode() string { + if x != nil { + return x.ConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedConnectionMode() string { + if x != nil { + return x.ServerPushedConnectionMode + } + return "" +} + +func (x *GetConfigResponse) GetServerPushedRelayTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedRelayTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PTimeoutSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PTimeoutSeconds + } + return 0 +} + +func (x *GetConfigResponse) GetServerPushedP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.ServerPushedP2PRetryMaxSeconds + } + return 0 +} + // PeerState contains the latest state of a peer type PeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1419,6 +1533,22 @@ type PeerState struct { Latency *durationpb.Duration `protobuf:"bytes,17,opt,name=latency,proto3" json:"latency,omitempty"` RelayAddress string `protobuf:"bytes,18,opt,name=relayAddress,proto3" json:"relayAddress,omitempty"` SshHostKey []byte `protobuf:"bytes,19,opt,name=sshHostKey,proto3" json:"sshHostKey,omitempty"` + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int32 `protobuf:"varint,20,opt,name=iceBackoffFailures,proto3" json:"iceBackoffFailures,omitempty"` + IceBackoffNextRetry *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=iceBackoffNextRetry,proto3" json:"iceBackoffNextRetry,omitempty"` + IceBackoffSuspended bool `protobuf:"varint,22,opt,name=iceBackoffSuspended,proto3" json:"iceBackoffSuspended,omitempty"` + // Phase 3.7i (#5989): per-peer enrichment from RemotePeerConfig. + ServerOnline bool `protobuf:"varint,30,opt,name=server_online,json=serverOnline,proto3" json:"server_online,omitempty"` + LastSeenAtServer *timestamppb.Timestamp `protobuf:"bytes,31,opt,name=last_seen_at_server,json=lastSeenAtServer,proto3" json:"last_seen_at_server,omitempty"` + Groups []string `protobuf:"bytes,32,rep,name=groups,proto3" json:"groups,omitempty"` + EffectiveConnectionMode string `protobuf:"bytes,33,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + ConfiguredConnectionMode string `protobuf:"bytes,34,opt,name=configured_connection_mode,json=configuredConnectionMode,proto3" json:"configured_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,35,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,36,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,37,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` + ConfiguredRelayTimeoutSecs uint32 `protobuf:"varint,38,opt,name=configured_relay_timeout_secs,json=configuredRelayTimeoutSecs,proto3" json:"configured_relay_timeout_secs,omitempty"` + ConfiguredP2PTimeoutSecs uint32 `protobuf:"varint,39,opt,name=configured_p2p_timeout_secs,json=configuredP2pTimeoutSecs,proto3" json:"configured_p2p_timeout_secs,omitempty"` + ConfiguredP2PRetryMaxSecs uint32 `protobuf:"varint,40,opt,name=configured_p2p_retry_max_secs,json=configuredP2pRetryMaxSecs,proto3" json:"configured_p2p_retry_max_secs,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1579,6 +1709,104 @@ func (x *PeerState) GetSshHostKey() []byte { return nil } +func (x *PeerState) GetIceBackoffFailures() int32 { + if x != nil { + return x.IceBackoffFailures + } + return 0 +} + +func (x *PeerState) GetIceBackoffNextRetry() *timestamppb.Timestamp { + if x != nil { + return x.IceBackoffNextRetry + } + return nil +} + +func (x *PeerState) GetIceBackoffSuspended() bool { + if x != nil { + return x.IceBackoffSuspended + } + return false +} + +func (x *PeerState) GetServerOnline() bool { + if x != nil { + return x.ServerOnline + } + return false +} + +func (x *PeerState) GetLastSeenAtServer() *timestamppb.Timestamp { + if x != nil { + return x.LastSeenAtServer + } + return nil +} + +func (x *PeerState) GetGroups() []string { + if x != nil { + return x.Groups + } + return nil +} + +func (x *PeerState) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *PeerState) GetConfiguredConnectionMode() string { + if x != nil { + return x.ConfiguredConnectionMode + } + return "" +} + +func (x *PeerState) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredRelayTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredRelayTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredP2PTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredP2PTimeoutSecs + } + return 0 +} + +func (x *PeerState) GetConfiguredP2PRetryMaxSecs() uint32 { + if x != nil { + return x.ConfiguredP2PRetryMaxSecs + } + return 0 +} + // LocalPeerState contains the latest state of the local peer type LocalPeerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2066,8 +2294,15 @@ type FullStatus struct { Events []*SystemEvent `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` LazyConnectionEnabled bool `protobuf:"varint,9,opt,name=lazyConnectionEnabled,proto3" json:"lazyConnectionEnabled,omitempty"` SshServerState *SSHServerState `protobuf:"bytes,10,opt,name=sshServerState,proto3" json:"sshServerState,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 3.7i (#5989): aggregate counters so UIs don't re-derive them. + ConfiguredPeersTotal uint32 `protobuf:"varint,50,opt,name=configured_peers_total,json=configuredPeersTotal,proto3" json:"configured_peers_total,omitempty"` + ServerOnlinePeers uint32 `protobuf:"varint,51,opt,name=server_online_peers,json=serverOnlinePeers,proto3" json:"server_online_peers,omitempty"` + P2PConnectedPeers uint32 `protobuf:"varint,52,opt,name=p2p_connected_peers,json=p2pConnectedPeers,proto3" json:"p2p_connected_peers,omitempty"` + RelayedConnectedPeers uint32 `protobuf:"varint,53,opt,name=relayed_connected_peers,json=relayedConnectedPeers,proto3" json:"relayed_connected_peers,omitempty"` + IdleOnlinePeers uint32 `protobuf:"varint,54,opt,name=idle_online_peers,json=idleOnlinePeers,proto3" json:"idle_online_peers,omitempty"` + ServerOfflinePeers uint32 `protobuf:"varint,55,opt,name=server_offline_peers,json=serverOfflinePeers,proto3" json:"server_offline_peers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FullStatus) Reset() { @@ -2170,6 +2405,48 @@ func (x *FullStatus) GetSshServerState() *SSHServerState { return nil } +func (x *FullStatus) GetConfiguredPeersTotal() uint32 { + if x != nil { + return x.ConfiguredPeersTotal + } + return 0 +} + +func (x *FullStatus) GetServerOnlinePeers() uint32 { + if x != nil { + return x.ServerOnlinePeers + } + return 0 +} + +func (x *FullStatus) GetP2PConnectedPeers() uint32 { + if x != nil { + return x.P2PConnectedPeers + } + return 0 +} + +func (x *FullStatus) GetRelayedConnectedPeers() uint32 { + if x != nil { + return x.RelayedConnectedPeers + } + return 0 +} + +func (x *FullStatus) GetIdleOnlinePeers() uint32 { + if x != nil { + return x.IdleOnlinePeers + } + return 0 +} + +func (x *FullStatus) GetServerOfflinePeers() uint32 { + if x != nil { + return x.ServerOfflinePeers + } + return 0 +} + // Networks type ListNetworksRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -4009,8 +4286,18 @@ type SetConfigRequest struct { EnableSSHRemotePortForwarding *bool `protobuf:"varint,32,opt,name=enableSSHRemotePortForwarding,proto3,oneof" json:"enableSSHRemotePortForwarding,omitempty"` DisableSSHAuth *bool `protobuf:"varint,33,opt,name=disableSSHAuth,proto3,oneof" json:"disableSSHAuth,omitempty"` SshJWTCacheTTL *int32 `protobuf:"varint,34,opt,name=sshJWTCacheTTL,proto3,oneof" json:"sshJWTCacheTTL,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + ConnectionMode *string `protobuf:"bytes,40,opt,name=connection_mode,json=connectionMode,proto3,oneof" json:"connection_mode,omitempty"` + P2PTimeoutSeconds *uint32 `protobuf:"varint,41,opt,name=p2p_timeout_seconds,json=p2pTimeoutSeconds,proto3,oneof" json:"p2p_timeout_seconds,omitempty"` + RelayTimeoutSeconds *uint32 `protobuf:"varint,42,opt,name=relay_timeout_seconds,json=relayTimeoutSeconds,proto3,oneof" json:"relay_timeout_seconds,omitempty"` + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + P2PRetryMaxSeconds *uint32 `protobuf:"varint,43,opt,name=p2p_retry_max_seconds,json=p2pRetryMaxSeconds,proto3,oneof" json:"p2p_retry_max_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SetConfigRequest) Reset() { @@ -4281,6 +4568,34 @@ func (x *SetConfigRequest) GetSshJWTCacheTTL() int32 { return 0 } +func (x *SetConfigRequest) GetConnectionMode() string { + if x != nil && x.ConnectionMode != nil { + return *x.ConnectionMode + } + return "" +} + +func (x *SetConfigRequest) GetP2PTimeoutSeconds() uint32 { + if x != nil && x.P2PTimeoutSeconds != nil { + return *x.P2PTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetRelayTimeoutSeconds() uint32 { + if x != nil && x.RelayTimeoutSeconds != nil { + return *x.RelayTimeoutSeconds + } + return 0 +} + +func (x *SetConfigRequest) GetP2PRetryMaxSeconds() uint32 { + if x != nil && x.P2PRetryMaxSeconds != nil { + return *x.P2PRetryMaxSeconds + } + return 0 +} + type SetConfigResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -6186,7 +6501,7 @@ var File_daemon_proto protoreflect.FileDescriptor const file_daemon_proto_rawDesc = "" + "\n" + "\fdaemon.proto\x12\x06daemon\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"\x0e\n" + - "\fEmptyRequest\"\xb6\x12\n" + + "\fEmptyRequest\"\xea\x14\n" + "\fLoginRequest\x12\x1a\n" + "\bsetupKey\x18\x01 \x01(\tR\bsetupKey\x12&\n" + "\fpreSharedKey\x18\x02 \x01(\tB\x02\x18\x01R\fpreSharedKey\x12$\n" + @@ -6230,7 +6545,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18$ \x01(\bH\x17R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18% \x01(\bH\x18R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18& \x01(\bH\x19R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18' \x01(\x05H\x1aR\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x1bR\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x1cR\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1dR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1eR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6257,7 +6576,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\xb5\x01\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\xb5\x01\n" + "\rLoginResponse\x12$\n" + "\rneedsSSOLogin\x18\x01 \x01(\bR\rneedsSSOLogin\x12\x1a\n" + "\buserCode\x18\x02 \x01(\tR\buserCode\x12(\n" + @@ -6290,7 +6613,7 @@ const file_daemon_proto_rawDesc = "" + "\fDownResponse\"P\n" + "\x10GetConfigRequest\x12 \n" + "\vprofileName\x18\x01 \x01(\tR\vprofileName\x12\x1a\n" + - "\busername\x18\x02 \x01(\tR\busername\"\xdb\b\n" + + "\busername\x18\x02 \x01(\tR\busername\"\xc3\f\n" + "\x11GetConfigResponse\x12$\n" + "\rmanagementUrl\x18\x01 \x01(\tR\rmanagementUrl\x12\x1e\n" + "\n" + @@ -6321,7 +6644,15 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x16 \x01(\bR\x1cenableSSHLocalPortForwarding\x12D\n" + "\x1denableSSHRemotePortForwarding\x18\x17 \x01(\bR\x1denableSSHRemotePortForwarding\x12&\n" + "\x0edisableSSHAuth\x18\x19 \x01(\bR\x0edisableSSHAuth\x12&\n" + - "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\"\xfe\x05\n" + + "\x0esshJWTCacheTTL\x18\x1a \x01(\x05R\x0esshJWTCacheTTL\x12'\n" + + "\x0fconnection_mode\x18\x1b \x01(\tR\x0econnectionMode\x12.\n" + + "\x13p2p_timeout_seconds\x18\x1c \x01(\rR\x11p2pTimeoutSeconds\x122\n" + + "\x15relay_timeout_seconds\x18\x1d \x01(\rR\x13relayTimeoutSeconds\x121\n" + + "\x15p2p_retry_max_seconds\x18\x1e \x01(\rR\x12p2pRetryMaxSeconds\x12A\n" + + "\x1dserver_pushed_connection_mode\x18\x1f \x01(\tR\x1aserverPushedConnectionMode\x12L\n" + + "#server_pushed_relay_timeout_seconds\x18 \x01(\rR\x1fserverPushedRelayTimeoutSeconds\x12H\n" + + "!server_pushed_p2p_timeout_seconds\x18! \x01(\rR\x1dserverPushedP2pTimeoutSeconds\x12K\n" + + "#server_pushed_p2p_retry_max_seconds\x18\" \x01(\rR\x1eserverPushedP2pRetryMaxSeconds\"\xb2\f\n" + "\tPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12\x1e\n" + @@ -6345,7 +6676,21 @@ const file_daemon_proto_rawDesc = "" + "\frelayAddress\x18\x12 \x01(\tR\frelayAddress\x12\x1e\n" + "\n" + "sshHostKey\x18\x13 \x01(\fR\n" + - "sshHostKey\"\xf0\x01\n" + + "sshHostKey\x12.\n" + + "\x12iceBackoffFailures\x18\x14 \x01(\x05R\x12iceBackoffFailures\x12L\n" + + "\x13iceBackoffNextRetry\x18\x15 \x01(\v2\x1a.google.protobuf.TimestampR\x13iceBackoffNextRetry\x120\n" + + "\x13iceBackoffSuspended\x18\x16 \x01(\bR\x13iceBackoffSuspended\x12#\n" + + "\rserver_online\x18\x1e \x01(\bR\fserverOnline\x12I\n" + + "\x13last_seen_at_server\x18\x1f \x01(\v2\x1a.google.protobuf.TimestampR\x10lastSeenAtServer\x12\x16\n" + + "\x06groups\x18 \x03(\tR\x06groups\x12:\n" + + "\x19effective_connection_mode\x18! \x01(\tR\x17effectiveConnectionMode\x12<\n" + + "\x1aconfigured_connection_mode\x18\" \x01(\tR\x18configuredConnectionMode\x12?\n" + + "\x1ceffective_relay_timeout_secs\x18# \x01(\rR\x19effectiveRelayTimeoutSecs\x12;\n" + + "\x1aeffective_p2p_timeout_secs\x18$ \x01(\rR\x17effectiveP2pTimeoutSecs\x12>\n" + + "\x1ceffective_p2p_retry_max_secs\x18% \x01(\rR\x18effectiveP2pRetryMaxSecs\x12A\n" + + "\x1dconfigured_relay_timeout_secs\x18& \x01(\rR\x1aconfiguredRelayTimeoutSecs\x12=\n" + + "\x1bconfigured_p2p_timeout_secs\x18' \x01(\rR\x18configuredP2pTimeoutSecs\x12@\n" + + "\x1dconfigured_p2p_retry_max_secs\x18( \x01(\rR\x19configuredP2pRetryMaxSecs\"\xf0\x01\n" + "\x0eLocalPeerState\x12\x0e\n" + "\x02IP\x18\x01 \x01(\tR\x02IP\x12\x16\n" + "\x06pubKey\x18\x02 \x01(\tR\x06pubKey\x12(\n" + @@ -6380,7 +6725,7 @@ const file_daemon_proto_rawDesc = "" + "\fportForwards\x18\x05 \x03(\tR\fportForwards\"^\n" + "\x0eSSHServerState\x12\x18\n" + "\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" + - "\bsessions\x18\x02 \x03(\v2\x16.daemon.SSHSessionInfoR\bsessions\"\xaf\x04\n" + + "\bsessions\x18\x02 \x03(\v2\x16.daemon.SSHSessionInfoR\bsessions\"\xdb\x06\n" + "\n" + "FullStatus\x12A\n" + "\x0fmanagementState\x18\x01 \x01(\v2\x17.daemon.ManagementStateR\x0fmanagementState\x125\n" + @@ -6394,7 +6739,13 @@ const file_daemon_proto_rawDesc = "" + "\x06events\x18\a \x03(\v2\x13.daemon.SystemEventR\x06events\x124\n" + "\x15lazyConnectionEnabled\x18\t \x01(\bR\x15lazyConnectionEnabled\x12>\n" + "\x0esshServerState\x18\n" + - " \x01(\v2\x16.daemon.SSHServerStateR\x0esshServerState\"\x15\n" + + " \x01(\v2\x16.daemon.SSHServerStateR\x0esshServerState\x124\n" + + "\x16configured_peers_total\x182 \x01(\rR\x14configuredPeersTotal\x12.\n" + + "\x13server_online_peers\x183 \x01(\rR\x11serverOnlinePeers\x12.\n" + + "\x13p2p_connected_peers\x184 \x01(\rR\x11p2pConnectedPeers\x126\n" + + "\x17relayed_connected_peers\x185 \x01(\rR\x15relayedConnectedPeers\x12*\n" + + "\x11idle_online_peers\x186 \x01(\rR\x0fidleOnlinePeers\x120\n" + + "\x14server_offline_peers\x187 \x01(\rR\x12serverOfflinePeers\"\x15\n" + "\x13ListNetworksRequest\"?\n" + "\x14ListNetworksResponse\x12'\n" + "\x06routes\x18\x01 \x03(\v2\x0f.daemon.NetworkR\x06routes\"a\n" + @@ -6534,7 +6885,7 @@ const file_daemon_proto_rawDesc = "" + "\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01B\x0e\n" + "\f_profileNameB\v\n" + "\t_username\"\x17\n" + - "\x15SwitchProfileResponse\"\xdf\x10\n" + + "\x15SwitchProfileResponse\"\x93\x13\n" + "\x10SetConfigRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + "\vprofileName\x18\x02 \x01(\tR\vprofileName\x12$\n" + @@ -6573,7 +6924,11 @@ const file_daemon_proto_rawDesc = "" + "\x1cenableSSHLocalPortForwarding\x18\x1f \x01(\bH\x14R\x1cenableSSHLocalPortForwarding\x88\x01\x01\x12I\n" + "\x1denableSSHRemotePortForwarding\x18 \x01(\bH\x15R\x1denableSSHRemotePortForwarding\x88\x01\x01\x12+\n" + "\x0edisableSSHAuth\x18! \x01(\bH\x16R\x0edisableSSHAuth\x88\x01\x01\x12+\n" + - "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01B\x13\n" + + "\x0esshJWTCacheTTL\x18\" \x01(\x05H\x17R\x0esshJWTCacheTTL\x88\x01\x01\x12,\n" + + "\x0fconnection_mode\x18( \x01(\tH\x18R\x0econnectionMode\x88\x01\x01\x123\n" + + "\x13p2p_timeout_seconds\x18) \x01(\rH\x19R\x11p2pTimeoutSeconds\x88\x01\x01\x127\n" + + "\x15relay_timeout_seconds\x18* \x01(\rH\x1aR\x13relayTimeoutSeconds\x88\x01\x01\x126\n" + + "\x15p2p_retry_max_seconds\x18+ \x01(\rH\x1bR\x12p2pRetryMaxSeconds\x88\x01\x01B\x13\n" + "\x11_rosenpassEnabledB\x10\n" + "\x0e_interfaceNameB\x10\n" + "\x0e_wireguardPortB\x17\n" + @@ -6597,7 +6952,11 @@ const file_daemon_proto_rawDesc = "" + "\x1d_enableSSHLocalPortForwardingB \n" + "\x1e_enableSSHRemotePortForwardingB\x11\n" + "\x0f_disableSSHAuthB\x11\n" + - "\x0f_sshJWTCacheTTL\"\x13\n" + + "\x0f_sshJWTCacheTTLB\x12\n" + + "\x10_connection_modeB\x16\n" + + "\x14_p2p_timeout_secondsB\x18\n" + + "\x16_relay_timeout_secondsB\x18\n" + + "\x16_p2p_retry_max_seconds\"\x13\n" + "\x11SetConfigResponse\"Q\n" + "\x11AddProfileRequest\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12 \n" + @@ -6896,121 +7255,123 @@ var file_daemon_proto_depIdxs = []int32{ 102, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp 102, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp 101, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState - 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState - 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 98, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 99, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State - 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 102, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 100, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 101, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady - 101, // 34: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration - 101, // 35: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration - 30, // 36: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 5, // 37: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 7, // 38: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 9, // 39: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 11, // 40: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 13, // 41: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 15, // 42: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 26, // 43: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 28, // 44: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 28, // 45: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 4, // 46: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 35, // 47: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 37, // 48: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 39, // 49: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 42, // 50: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 44, // 51: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 46, // 52: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 48, // 53: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 51, // 54: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 92, // 55: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest - 94, // 56: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest - 96, // 57: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest - 54, // 58: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 56, // 59: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 58, // 60: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 60, // 61: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 62, // 62: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 64, // 63: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 66, // 64: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 69, // 65: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 71, // 66: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 73, // 67: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 75, // 68: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest - 77, // 69: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 79, // 70: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 81, // 71: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 83, // 72: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 85, // 73: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 87, // 74: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 89, // 75: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 6, // 76: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 8, // 77: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 10, // 78: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 12, // 79: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 14, // 80: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 16, // 81: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 27, // 82: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 29, // 83: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 29, // 84: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 34, // 85: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 36, // 86: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 38, // 87: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 40, // 88: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 43, // 89: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 45, // 90: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 47, // 91: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 49, // 92: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 53, // 93: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 93, // 94: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket - 95, // 95: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse - 97, // 96: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse - 55, // 97: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 57, // 98: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 59, // 99: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 61, // 100: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 63, // 101: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 65, // 102: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 67, // 103: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 70, // 104: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 72, // 105: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 74, // 106: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 76, // 107: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse - 78, // 108: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 109: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 110: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 84, // 111: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 86, // 112: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 88, // 113: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 90, // 114: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 76, // [76:115] is the sub-list for method output_type - 37, // [37:76] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 102, // 5: daemon.PeerState.iceBackoffNextRetry:type_name -> google.protobuf.Timestamp + 102, // 6: daemon.PeerState.last_seen_at_server:type_name -> google.protobuf.Timestamp + 23, // 7: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 20, // 8: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 19, // 9: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 18, // 10: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 17, // 11: daemon.FullStatus.peers:type_name -> daemon.PeerState + 21, // 12: daemon.FullStatus.relays:type_name -> daemon.RelayState + 22, // 13: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 55, // 14: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 24, // 15: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 31, // 16: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 98, // 17: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 99, // 18: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 32, // 19: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 32, // 20: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 33, // 21: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 22: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 23: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 41, // 24: daemon.ListStatesResponse.states:type_name -> daemon.State + 50, // 25: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 52, // 26: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 2, // 27: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 3, // 28: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 102, // 29: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 100, // 30: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 55, // 31: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 101, // 32: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 68, // 33: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 34: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 91, // 35: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 101, // 36: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration + 101, // 37: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration + 30, // 38: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 5, // 39: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 7, // 40: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 9, // 41: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 11, // 42: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 13, // 43: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 15, // 44: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 26, // 45: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 28, // 46: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 28, // 47: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 4, // 48: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 35, // 49: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 37, // 50: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 39, // 51: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 42, // 52: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 44, // 53: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 46, // 54: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 48, // 55: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 51, // 56: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 92, // 57: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest + 94, // 58: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest + 96, // 59: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest + 54, // 60: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 56, // 61: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 58, // 62: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 60, // 63: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 62, // 64: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 64, // 65: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 66, // 66: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 69, // 67: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 71, // 68: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 73, // 69: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 75, // 70: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 77, // 71: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 79, // 72: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 81, // 73: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 83, // 74: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 75: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 87, // 76: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 89, // 77: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 6, // 78: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 8, // 79: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 10, // 80: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 12, // 81: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 14, // 82: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 16, // 83: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 27, // 84: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 29, // 85: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 29, // 86: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 34, // 87: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 36, // 88: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 38, // 89: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 40, // 90: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 43, // 91: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 45, // 92: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 47, // 93: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 49, // 94: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 53, // 95: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 93, // 96: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket + 95, // 97: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse + 97, // 98: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse + 55, // 99: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 57, // 100: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 59, // 101: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 61, // 102: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 63, // 103: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 65, // 104: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 67, // 105: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 70, // 106: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 72, // 107: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 74, // 108: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 76, // 109: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 78, // 110: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 111: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 112: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 113: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 114: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 88, // 115: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 90, // 116: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 78, // [78:117] is the sub-list for method output_type + 39, // [39:78] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_daemon_proto_init() } diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 3fee9eca82d..02110068ec3 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -204,6 +204,18 @@ message LoginRequest { optional bool enableSSHRemotePortForwarding = 37; optional bool disableSSHAuth = 38; optional int32 sshJWTCacheTTL = 39; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message LoginResponse { @@ -311,6 +323,28 @@ message GetConfigResponse { bool disableSSHAuth = 25; int32 sshJWTCacheTTL = 26; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is the canonical lower-kebab-case name + // (relay-forced, p2p, p2p-lazy, p2p-dynamic, follow-server) or + // empty string when no local override is set. + string connection_mode = 27; + uint32 p2p_timeout_seconds = 28; + uint32 relay_timeout_seconds = 29; + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = no + // local override (daemon falls back to server-pushed value or built-in + // 15-min default). + uint32 p2p_retry_max_seconds = 30; + + // Phase 3.7h: values most recently pushed by the management server's + // PeerConfig (independent of any local override). The UI surfaces + // these so users can see what "Follow server" would inherit and which + // numeric defaults the empty-entry overrides fall back to. All four + // are 0/empty when the engine has not received PeerConfig yet. + string server_pushed_connection_mode = 31; + uint32 server_pushed_relay_timeout_seconds = 32; + uint32 server_pushed_p2p_timeout_seconds = 33; + uint32 server_pushed_p2p_retry_max_seconds = 34; } // PeerState contains the latest state of a peer @@ -333,6 +367,22 @@ message PeerState { google.protobuf.Duration latency = 17; string relayAddress = 18; bytes sshHostKey = 19; + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + int32 iceBackoffFailures = 20; + google.protobuf.Timestamp iceBackoffNextRetry = 21; + bool iceBackoffSuspended = 22; + // Phase 3.7i (#5989): per-peer enrichment from RemotePeerConfig. + bool server_online = 30; + google.protobuf.Timestamp last_seen_at_server = 31; + repeated string groups = 32; + string effective_connection_mode = 33; + string configured_connection_mode = 34; + uint32 effective_relay_timeout_secs = 35; + uint32 effective_p2p_timeout_secs = 36; + uint32 effective_p2p_retry_max_secs = 37; + uint32 configured_relay_timeout_secs = 38; + uint32 configured_p2p_timeout_secs = 39; + uint32 configured_p2p_retry_max_secs = 40; } // LocalPeerState contains the latest state of the local peer @@ -403,6 +453,13 @@ message FullStatus { bool lazyConnectionEnabled = 9; SSHServerState sshServerState = 10; + // Phase 3.7i (#5989): aggregate counters so UIs don't re-derive them. + uint32 configured_peers_total = 50; + uint32 server_online_peers = 51; + uint32 p2p_connected_peers = 52; + uint32 relayed_connected_peers = 53; + uint32 idle_online_peers = 54; + uint32 server_offline_peers = 55; } // Networks @@ -672,6 +729,18 @@ message SetConfigRequest { optional bool enableSSHRemotePortForwarding = 32; optional bool disableSSHAuth = 33; optional int32 sshJWTCacheTTL = 34; + + // Phase 1 (#5989): peer-connection mode and idle timeouts. + // connection_mode is a string (relay-forced, p2p, p2p-lazy, p2p-dynamic, + // follow-server, or empty); parsed via connectionmode.ParseString at the + // daemon side. Empty means "no client-side override, use server value". + optional string connection_mode = 40; + optional uint32 p2p_timeout_seconds = 41; + optional uint32 relay_timeout_seconds = 42; + + // Phase 3 (#5989): cap on the ICE-failure backoff schedule. 0 = use + // server-pushed value (or built-in default 15 min). + optional uint32 p2p_retry_max_seconds = 43; } message SetConfigResponse{} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index 66a8efcc325..d5c16ac56f5 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.6.1 -// - protoc v6.33.1 -// source: daemon.proto package proto @@ -15,50 +11,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - DaemonService_Login_FullMethodName = "/daemon.DaemonService/Login" - DaemonService_WaitSSOLogin_FullMethodName = "/daemon.DaemonService/WaitSSOLogin" - DaemonService_Up_FullMethodName = "/daemon.DaemonService/Up" - DaemonService_Status_FullMethodName = "/daemon.DaemonService/Status" - DaemonService_Down_FullMethodName = "/daemon.DaemonService/Down" - DaemonService_GetConfig_FullMethodName = "/daemon.DaemonService/GetConfig" - DaemonService_ListNetworks_FullMethodName = "/daemon.DaemonService/ListNetworks" - DaemonService_SelectNetworks_FullMethodName = "/daemon.DaemonService/SelectNetworks" - DaemonService_DeselectNetworks_FullMethodName = "/daemon.DaemonService/DeselectNetworks" - DaemonService_ForwardingRules_FullMethodName = "/daemon.DaemonService/ForwardingRules" - DaemonService_DebugBundle_FullMethodName = "/daemon.DaemonService/DebugBundle" - DaemonService_GetLogLevel_FullMethodName = "/daemon.DaemonService/GetLogLevel" - DaemonService_SetLogLevel_FullMethodName = "/daemon.DaemonService/SetLogLevel" - DaemonService_ListStates_FullMethodName = "/daemon.DaemonService/ListStates" - DaemonService_CleanState_FullMethodName = "/daemon.DaemonService/CleanState" - DaemonService_DeleteState_FullMethodName = "/daemon.DaemonService/DeleteState" - DaemonService_SetSyncResponsePersistence_FullMethodName = "/daemon.DaemonService/SetSyncResponsePersistence" - DaemonService_TracePacket_FullMethodName = "/daemon.DaemonService/TracePacket" - DaemonService_StartCapture_FullMethodName = "/daemon.DaemonService/StartCapture" - DaemonService_StartBundleCapture_FullMethodName = "/daemon.DaemonService/StartBundleCapture" - DaemonService_StopBundleCapture_FullMethodName = "/daemon.DaemonService/StopBundleCapture" - DaemonService_SubscribeEvents_FullMethodName = "/daemon.DaemonService/SubscribeEvents" - DaemonService_GetEvents_FullMethodName = "/daemon.DaemonService/GetEvents" - DaemonService_SwitchProfile_FullMethodName = "/daemon.DaemonService/SwitchProfile" - DaemonService_SetConfig_FullMethodName = "/daemon.DaemonService/SetConfig" - DaemonService_AddProfile_FullMethodName = "/daemon.DaemonService/AddProfile" - DaemonService_RemoveProfile_FullMethodName = "/daemon.DaemonService/RemoveProfile" - DaemonService_ListProfiles_FullMethodName = "/daemon.DaemonService/ListProfiles" - DaemonService_GetActiveProfile_FullMethodName = "/daemon.DaemonService/GetActiveProfile" - DaemonService_Logout_FullMethodName = "/daemon.DaemonService/Logout" - DaemonService_GetFeatures_FullMethodName = "/daemon.DaemonService/GetFeatures" - DaemonService_TriggerUpdate_FullMethodName = "/daemon.DaemonService/TriggerUpdate" - DaemonService_GetPeerSSHHostKey_FullMethodName = "/daemon.DaemonService/GetPeerSSHHostKey" - DaemonService_RequestJWTAuth_FullMethodName = "/daemon.DaemonService/RequestJWTAuth" - DaemonService_WaitJWTToken_FullMethodName = "/daemon.DaemonService/WaitJWTToken" - DaemonService_StartCPUProfile_FullMethodName = "/daemon.DaemonService/StartCPUProfile" - DaemonService_StopCPUProfile_FullMethodName = "/daemon.DaemonService/StopCPUProfile" - DaemonService_GetInstallerResult_FullMethodName = "/daemon.DaemonService/GetInstallerResult" - DaemonService_ExposeService_FullMethodName = "/daemon.DaemonService/ExposeService" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // DaemonServiceClient is the client API for DaemonService service. // @@ -101,13 +55,13 @@ type DaemonServiceClient interface { TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) + StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) - SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) + SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) @@ -133,7 +87,7 @@ type DaemonServiceClient interface { StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) + ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) } type daemonServiceClient struct { @@ -145,9 +99,8 @@ func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { } func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoginResponse) - err := c.cc.Invoke(ctx, DaemonService_Login_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Login", in, out, opts...) if err != nil { return nil, err } @@ -155,9 +108,8 @@ func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts } func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitSSOLoginResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitSSOLogin_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitSSOLogin", in, out, opts...) if err != nil { return nil, err } @@ -165,9 +117,8 @@ func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLogin } func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpResponse) - err := c.cc.Invoke(ctx, DaemonService_Up_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Up", in, out, opts...) if err != nil { return nil, err } @@ -175,9 +126,8 @@ func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grp } func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatusResponse) - err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Status", in, out, opts...) if err != nil { return nil, err } @@ -185,9 +135,8 @@ func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opt } func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DownResponse) - err := c.cc.Invoke(ctx, DaemonService_Down_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Down", in, out, opts...) if err != nil { return nil, err } @@ -195,9 +144,8 @@ func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts .. } func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_GetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetConfig", in, out, opts...) if err != nil { return nil, err } @@ -205,9 +153,8 @@ func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigReques } func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_ListNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListNetworks", in, out, opts...) if err != nil { return nil, err } @@ -215,9 +162,8 @@ func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworks } func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_SelectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SelectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -225,9 +171,8 @@ func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetw } func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, DaemonService_DeselectNetworks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeselectNetworks", in, out, opts...) if err != nil { return nil, err } @@ -235,9 +180,8 @@ func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNe } func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*ForwardingRulesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ForwardingRulesResponse) - err := c.cc.Invoke(ctx, DaemonService_ForwardingRules_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ForwardingRules", in, out, opts...) if err != nil { return nil, err } @@ -245,9 +189,8 @@ func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequ } func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRequest, opts ...grpc.CallOption) (*DebugBundleResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DebugBundleResponse) - err := c.cc.Invoke(ctx, DaemonService_DebugBundle_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DebugBundle", in, out, opts...) if err != nil { return nil, err } @@ -255,9 +198,8 @@ func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRe } func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRequest, opts ...grpc.CallOption) (*GetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_GetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -265,9 +207,8 @@ func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRe } func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRequest, opts ...grpc.CallOption) (*SetLogLevelResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetLogLevelResponse) - err := c.cc.Invoke(ctx, DaemonService_SetLogLevel_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetLogLevel", in, out, opts...) if err != nil { return nil, err } @@ -275,9 +216,8 @@ func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRe } func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListStatesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListStates_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListStates", in, out, opts...) if err != nil { return nil, err } @@ -285,9 +225,8 @@ func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequ } func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequest, opts ...grpc.CallOption) (*CleanStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CleanStateResponse) - err := c.cc.Invoke(ctx, DaemonService_CleanState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/CleanState", in, out, opts...) if err != nil { return nil, err } @@ -295,9 +234,8 @@ func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequ } func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteStateResponse) - err := c.cc.Invoke(ctx, DaemonService_DeleteState_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeleteState", in, out, opts...) if err != nil { return nil, err } @@ -305,9 +243,8 @@ func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRe } func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetSyncResponsePersistenceResponse) - err := c.cc.Invoke(ctx, DaemonService_SetSyncResponsePersistence_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetSyncResponsePersistence", in, out, opts...) if err != nil { return nil, err } @@ -315,22 +252,20 @@ func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in } func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TracePacketResponse) - err := c.cc.Invoke(ctx, DaemonService_TracePacket_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TracePacket", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_StartCapture_FullMethodName, cOpts...) +func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (DaemonService_StartCaptureClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], "/daemon.DaemonService/StartCapture", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[StartCaptureRequest, CapturePacket]{ClientStream: stream} + x := &daemonServiceStartCaptureClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -340,13 +275,26 @@ func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCapture return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureClient = grpc.ServerStreamingClient[CapturePacket] +type DaemonService_StartCaptureClient interface { + Recv() (*CapturePacket, error) + grpc.ClientStream +} + +type daemonServiceStartCaptureClient struct { + grpc.ClientStream +} + +func (x *daemonServiceStartCaptureClient) Recv() (*CapturePacket, error) { + m := new(CapturePacket) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StartBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartBundleCapture", in, out, opts...) if err != nil { return nil, err } @@ -354,22 +302,20 @@ func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartB } func (c *daemonServiceClient) StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopBundleCaptureResponse) - err := c.cc.Invoke(ctx, DaemonService_StopBundleCapture_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopBundleCapture", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_SubscribeEvents_FullMethodName, cOpts...) +func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], "/daemon.DaemonService/SubscribeEvents", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[SubscribeRequest, SystemEvent]{ClientStream: stream} + x := &daemonServiceSubscribeEventsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -379,13 +325,26 @@ func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *Subscribe return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsClient = grpc.ServerStreamingClient[SystemEvent] +type DaemonService_SubscribeEventsClient interface { + Recv() (*SystemEvent, error) + grpc.ClientStream +} + +type daemonServiceSubscribeEventsClient struct { + grpc.ClientStream +} + +func (x *daemonServiceSubscribeEventsClient) Recv() (*SystemEvent, error) { + m := new(SystemEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetEventsResponse) - err := c.cc.Invoke(ctx, DaemonService_GetEvents_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetEvents", in, out, opts...) if err != nil { return nil, err } @@ -393,9 +352,8 @@ func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsReques } func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SwitchProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_SwitchProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SwitchProfile", in, out, opts...) if err != nil { return nil, err } @@ -403,9 +361,8 @@ func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfi } func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetConfigResponse) - err := c.cc.Invoke(ctx, DaemonService_SetConfig_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetConfig", in, out, opts...) if err != nil { return nil, err } @@ -413,9 +370,8 @@ func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigReques } func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequest, opts ...grpc.CallOption) (*AddProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_AddProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/AddProfile", in, out, opts...) if err != nil { return nil, err } @@ -423,9 +379,8 @@ func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequ } func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfileRequest, opts ...grpc.CallOption) (*RemoveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RemoveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_RemoveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RemoveProfile", in, out, opts...) if err != nil { return nil, err } @@ -433,9 +388,8 @@ func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfi } func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfilesRequest, opts ...grpc.CallOption) (*ListProfilesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListProfilesResponse) - err := c.cc.Invoke(ctx, DaemonService_ListProfiles_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListProfiles", in, out, opts...) if err != nil { return nil, err } @@ -443,9 +397,8 @@ func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfiles } func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiveProfileRequest, opts ...grpc.CallOption) (*GetActiveProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetActiveProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_GetActiveProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetActiveProfile", in, out, opts...) if err != nil { return nil, err } @@ -453,9 +406,8 @@ func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiv } func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LogoutResponse) - err := c.cc.Invoke(ctx, DaemonService_Logout_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/Logout", in, out, opts...) if err != nil { return nil, err } @@ -463,9 +415,8 @@ func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opt } func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetFeaturesResponse) - err := c.cc.Invoke(ctx, DaemonService_GetFeatures_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetFeatures", in, out, opts...) if err != nil { return nil, err } @@ -473,9 +424,8 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe } func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TriggerUpdateResponse) - err := c.cc.Invoke(ctx, DaemonService_TriggerUpdate_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...) if err != nil { return nil, err } @@ -483,9 +433,8 @@ func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpda } func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPeerSSHHostKeyResponse) - err := c.cc.Invoke(ctx, DaemonService_GetPeerSSHHostKey_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...) if err != nil { return nil, err } @@ -493,9 +442,8 @@ func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeer } func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RequestJWTAuthResponse) - err := c.cc.Invoke(ctx, DaemonService_RequestJWTAuth_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/RequestJWTAuth", in, out, opts...) if err != nil { return nil, err } @@ -503,9 +451,8 @@ func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWT } func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitJWTTokenResponse) - err := c.cc.Invoke(ctx, DaemonService_WaitJWTToken_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitJWTToken", in, out, opts...) if err != nil { return nil, err } @@ -513,9 +460,8 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken } func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StartCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -523,9 +469,8 @@ func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUP } func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopCPUProfileResponse) - err := c.cc.Invoke(ctx, DaemonService_StopCPUProfile_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) if err != nil { return nil, err } @@ -533,22 +478,20 @@ func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUPro } func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InstallerResultResponse) - err := c.cc.Invoke(ctx, DaemonService_GetInstallerResult_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetInstallerResult", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], DaemonService_ExposeService_FullMethodName, cOpts...) +func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], "/daemon.DaemonService/ExposeService", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ExposeServiceRequest, ExposeServiceEvent]{ClientStream: stream} + x := &daemonServiceExposeServiceClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -558,12 +501,26 @@ func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServi return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceClient = grpc.ServerStreamingClient[ExposeServiceEvent] +type DaemonService_ExposeServiceClient interface { + Recv() (*ExposeServiceEvent, error) + grpc.ClientStream +} + +type daemonServiceExposeServiceClient struct { + grpc.ClientStream +} + +func (x *daemonServiceExposeServiceClient) Recv() (*ExposeServiceEvent, error) { + m := new(ExposeServiceEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer -// for forward compatibility. +// for forward compatibility type DaemonServiceServer interface { // Login uses setup key to prepare configuration for the daemon. Login(context.Context, *LoginRequest) (*LoginResponse, error) @@ -602,13 +559,13 @@ type DaemonServiceServer interface { TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) // StartCapture begins streaming packet capture on the WireGuard interface. // Requires --enable-capture set at service install/reconfigure time. - StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error + StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error // StartBundleCapture begins capturing packets to a server-side temp file // for inclusion in the next debug bundle. Auto-stops after the given timeout. StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) // StopBundleCapture stops the running bundle capture. Idempotent. StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) - SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error + SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) @@ -634,136 +591,132 @@ type DaemonServiceServer interface { StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error + ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error mustEmbedUnimplementedDaemonServiceServer() } -// UnimplementedDaemonServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedDaemonServiceServer struct{} +// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations. +type UnimplementedDaemonServiceServer struct { +} func (UnimplementedDaemonServiceServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Login not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Login not implemented") } func (UnimplementedDaemonServiceServer) WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitSSOLogin not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitSSOLogin not implemented") } func (UnimplementedDaemonServiceServer) Up(context.Context, *UpRequest) (*UpResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Up not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Up not implemented") } func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Status not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") } func (UnimplementedDaemonServiceServer) Down(context.Context, *DownRequest) (*DownResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Down not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Down not implemented") } func (UnimplementedDaemonServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented") } func (UnimplementedDaemonServiceServer) ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListNetworks not implemented") } func (UnimplementedDaemonServiceServer) SelectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SelectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SelectNetworks not implemented") } func (UnimplementedDaemonServiceServer) DeselectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeselectNetworks not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeselectNetworks not implemented") } func (UnimplementedDaemonServiceServer) ForwardingRules(context.Context, *EmptyRequest) (*ForwardingRulesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ForwardingRules not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ForwardingRules not implemented") } func (UnimplementedDaemonServiceServer) DebugBundle(context.Context, *DebugBundleRequest) (*DebugBundleResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DebugBundle not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DebugBundle not implemented") } func (UnimplementedDaemonServiceServer) GetLogLevel(context.Context, *GetLogLevelRequest) (*GetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) SetLogLevel(context.Context, *SetLogLevelRequest) (*SetLogLevelResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetLogLevel not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListStates not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListStates not implemented") } func (UnimplementedDaemonServiceServer) CleanState(context.Context, *CleanStateRequest) (*CleanStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CleanState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method CleanState not implemented") } func (UnimplementedDaemonServiceServer) DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeleteState not implemented") + return nil, status.Errorf(codes.Unimplemented, "method DeleteState not implemented") } func (UnimplementedDaemonServiceServer) SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") } func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TracePacket not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TracePacket not implemented") } -func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error { - return status.Error(codes.Unimplemented, "method StartCapture not implemented") +func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, DaemonService_StartCaptureServer) error { + return status.Errorf(codes.Unimplemented, "method StartCapture not implemented") } func (UnimplementedDaemonServiceServer) StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartBundleCapture not implemented") } func (UnimplementedDaemonServiceServer) StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopBundleCapture not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopBundleCapture not implemented") } -func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error { - return status.Error(codes.Unimplemented, "method SubscribeEvents not implemented") +func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeEvents not implemented") } func (UnimplementedDaemonServiceServer) GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetEvents not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetEvents not implemented") } func (UnimplementedDaemonServiceServer) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SwitchProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SwitchProfile not implemented") } func (UnimplementedDaemonServiceServer) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetConfig not implemented") + return nil, status.Errorf(codes.Unimplemented, "method SetConfig not implemented") } func (UnimplementedDaemonServiceServer) AddProfile(context.Context, *AddProfileRequest) (*AddProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method AddProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method AddProfile not implemented") } func (UnimplementedDaemonServiceServer) RemoveProfile(context.Context, *RemoveProfileRequest) (*RemoveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RemoveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RemoveProfile not implemented") } func (UnimplementedDaemonServiceServer) ListProfiles(context.Context, *ListProfilesRequest) (*ListProfilesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListProfiles not implemented") + return nil, status.Errorf(codes.Unimplemented, "method ListProfiles not implemented") } func (UnimplementedDaemonServiceServer) GetActiveProfile(context.Context, *GetActiveProfileRequest) (*GetActiveProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetActiveProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetActiveProfile not implemented") } func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Logout not implemented") + return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetFeatures not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented") } func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) { - return nil, status.Error(codes.Unimplemented, "method TriggerUpdate not implemented") + return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented") } func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") } func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) { - return nil, status.Error(codes.Unimplemented, "method RequestJWTAuth not implemented") + return nil, status.Errorf(codes.Unimplemented, "method RequestJWTAuth not implemented") } func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { - return nil, status.Error(codes.Unimplemented, "method WaitJWTToken not implemented") + return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") } func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StartCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method StopCPUProfile not implemented") + return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetInstallerResult not implemented") + return nil, status.Errorf(codes.Unimplemented, "method GetInstallerResult not implemented") } -func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error { - return status.Error(codes.Unimplemented, "method ExposeService not implemented") +func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error { + return status.Errorf(codes.Unimplemented, "method ExposeService not implemented") } func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} -func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to DaemonServiceServer will @@ -773,13 +726,6 @@ type UnsafeDaemonServiceServer interface { } func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { - // If the following call panics, it indicates UnimplementedDaemonServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&DaemonService_ServiceDesc, srv) } @@ -793,7 +739,7 @@ func _DaemonService_Login_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Login_FullMethodName, + FullMethod: "/daemon.DaemonService/Login", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Login(ctx, req.(*LoginRequest)) @@ -811,7 +757,7 @@ func _DaemonService_WaitSSOLogin_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitSSOLogin_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitSSOLogin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitSSOLogin(ctx, req.(*WaitSSOLoginRequest)) @@ -829,7 +775,7 @@ func _DaemonService_Up_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Up_FullMethodName, + FullMethod: "/daemon.DaemonService/Up", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Up(ctx, req.(*UpRequest)) @@ -847,7 +793,7 @@ func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Status_FullMethodName, + FullMethod: "/daemon.DaemonService/Status", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) @@ -865,7 +811,7 @@ func _DaemonService_Down_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Down_FullMethodName, + FullMethod: "/daemon.DaemonService/Down", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Down(ctx, req.(*DownRequest)) @@ -883,7 +829,7 @@ func _DaemonService_GetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/GetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) @@ -901,7 +847,7 @@ func _DaemonService_ListNetworks_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/ListNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListNetworks(ctx, req.(*ListNetworksRequest)) @@ -919,7 +865,7 @@ func _DaemonService_SelectNetworks_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SelectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/SelectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SelectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -937,7 +883,7 @@ func _DaemonService_DeselectNetworks_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeselectNetworks_FullMethodName, + FullMethod: "/daemon.DaemonService/DeselectNetworks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeselectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -955,7 +901,7 @@ func _DaemonService_ForwardingRules_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ForwardingRules_FullMethodName, + FullMethod: "/daemon.DaemonService/ForwardingRules", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ForwardingRules(ctx, req.(*EmptyRequest)) @@ -973,7 +919,7 @@ func _DaemonService_DebugBundle_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DebugBundle_FullMethodName, + FullMethod: "/daemon.DaemonService/DebugBundle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DebugBundle(ctx, req.(*DebugBundleRequest)) @@ -991,7 +937,7 @@ func _DaemonService_GetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/GetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetLogLevel(ctx, req.(*GetLogLevelRequest)) @@ -1009,7 +955,7 @@ func _DaemonService_SetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetLogLevel_FullMethodName, + FullMethod: "/daemon.DaemonService/SetLogLevel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetLogLevel(ctx, req.(*SetLogLevelRequest)) @@ -1027,7 +973,7 @@ func _DaemonService_ListStates_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListStates_FullMethodName, + FullMethod: "/daemon.DaemonService/ListStates", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListStates(ctx, req.(*ListStatesRequest)) @@ -1045,7 +991,7 @@ func _DaemonService_CleanState_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_CleanState_FullMethodName, + FullMethod: "/daemon.DaemonService/CleanState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).CleanState(ctx, req.(*CleanStateRequest)) @@ -1063,7 +1009,7 @@ func _DaemonService_DeleteState_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_DeleteState_FullMethodName, + FullMethod: "/daemon.DaemonService/DeleteState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeleteState(ctx, req.(*DeleteStateRequest)) @@ -1081,7 +1027,7 @@ func _DaemonService_SetSyncResponsePersistence_Handler(srv interface{}, ctx cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetSyncResponsePersistence_FullMethodName, + FullMethod: "/daemon.DaemonService/SetSyncResponsePersistence", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetSyncResponsePersistence(ctx, req.(*SetSyncResponsePersistenceRequest)) @@ -1099,7 +1045,7 @@ func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TracePacket_FullMethodName, + FullMethod: "/daemon.DaemonService/TracePacket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TracePacket(ctx, req.(*TracePacketRequest)) @@ -1112,11 +1058,21 @@ func _DaemonService_StartCapture_Handler(srv interface{}, stream grpc.ServerStre if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).StartCapture(m, &grpc.GenericServerStream[StartCaptureRequest, CapturePacket]{ServerStream: stream}) + return srv.(DaemonServiceServer).StartCapture(m, &daemonServiceStartCaptureServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_StartCaptureServer = grpc.ServerStreamingServer[CapturePacket] +type DaemonService_StartCaptureServer interface { + Send(*CapturePacket) error + grpc.ServerStream +} + +type daemonServiceStartCaptureServer struct { + grpc.ServerStream +} + +func (x *daemonServiceStartCaptureServer) Send(m *CapturePacket) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartBundleCaptureRequest) @@ -1128,7 +1084,7 @@ func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StartBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartBundleCapture(ctx, req.(*StartBundleCaptureRequest)) @@ -1146,7 +1102,7 @@ func _DaemonService_StopBundleCapture_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopBundleCapture_FullMethodName, + FullMethod: "/daemon.DaemonService/StopBundleCapture", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopBundleCapture(ctx, req.(*StopBundleCaptureRequest)) @@ -1159,11 +1115,21 @@ func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerS if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).SubscribeEvents(m, &grpc.GenericServerStream[SubscribeRequest, SystemEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).SubscribeEvents(m, &daemonServiceSubscribeEventsServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_SubscribeEventsServer = grpc.ServerStreamingServer[SystemEvent] +type DaemonService_SubscribeEventsServer interface { + Send(*SystemEvent) error + grpc.ServerStream +} + +type daemonServiceSubscribeEventsServer struct { + grpc.ServerStream +} + +func (x *daemonServiceSubscribeEventsServer) Send(m *SystemEvent) error { + return x.ServerStream.SendMsg(m) +} func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetEventsRequest) @@ -1175,7 +1141,7 @@ func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetEvents_FullMethodName, + FullMethod: "/daemon.DaemonService/GetEvents", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetEvents(ctx, req.(*GetEventsRequest)) @@ -1193,7 +1159,7 @@ func _DaemonService_SwitchProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SwitchProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/SwitchProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SwitchProfile(ctx, req.(*SwitchProfileRequest)) @@ -1211,7 +1177,7 @@ func _DaemonService_SetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_SetConfig_FullMethodName, + FullMethod: "/daemon.DaemonService/SetConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetConfig(ctx, req.(*SetConfigRequest)) @@ -1229,7 +1195,7 @@ func _DaemonService_AddProfile_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_AddProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/AddProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).AddProfile(ctx, req.(*AddProfileRequest)) @@ -1247,7 +1213,7 @@ func _DaemonService_RemoveProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RemoveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/RemoveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RemoveProfile(ctx, req.(*RemoveProfileRequest)) @@ -1265,7 +1231,7 @@ func _DaemonService_ListProfiles_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_ListProfiles_FullMethodName, + FullMethod: "/daemon.DaemonService/ListProfiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListProfiles(ctx, req.(*ListProfilesRequest)) @@ -1283,7 +1249,7 @@ func _DaemonService_GetActiveProfile_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetActiveProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/GetActiveProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetActiveProfile(ctx, req.(*GetActiveProfileRequest)) @@ -1301,7 +1267,7 @@ func _DaemonService_Logout_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_Logout_FullMethodName, + FullMethod: "/daemon.DaemonService/Logout", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Logout(ctx, req.(*LogoutRequest)) @@ -1319,7 +1285,7 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetFeatures_FullMethodName, + FullMethod: "/daemon.DaemonService/GetFeatures", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetFeatures(ctx, req.(*GetFeaturesRequest)) @@ -1337,7 +1303,7 @@ func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_TriggerUpdate_FullMethodName, + FullMethod: "/daemon.DaemonService/TriggerUpdate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest)) @@ -1355,7 +1321,7 @@ func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetPeerSSHHostKey_FullMethodName, + FullMethod: "/daemon.DaemonService/GetPeerSSHHostKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetPeerSSHHostKey(ctx, req.(*GetPeerSSHHostKeyRequest)) @@ -1373,7 +1339,7 @@ func _DaemonService_RequestJWTAuth_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_RequestJWTAuth_FullMethodName, + FullMethod: "/daemon.DaemonService/RequestJWTAuth", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RequestJWTAuth(ctx, req.(*RequestJWTAuthRequest)) @@ -1391,7 +1357,7 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_WaitJWTToken_FullMethodName, + FullMethod: "/daemon.DaemonService/WaitJWTToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitJWTToken(ctx, req.(*WaitJWTTokenRequest)) @@ -1409,7 +1375,7 @@ func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StartCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StartCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) @@ -1427,7 +1393,7 @@ func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_StopCPUProfile_FullMethodName, + FullMethod: "/daemon.DaemonService/StopCPUProfile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) @@ -1445,7 +1411,7 @@ func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: DaemonService_GetInstallerResult_FullMethodName, + FullMethod: "/daemon.DaemonService/GetInstallerResult", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetInstallerResult(ctx, req.(*InstallerResultRequest)) @@ -1458,11 +1424,21 @@ func _DaemonService_ExposeService_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).ExposeService(m, &grpc.GenericServerStream[ExposeServiceRequest, ExposeServiceEvent]{ServerStream: stream}) + return srv.(DaemonServiceServer).ExposeService(m, &daemonServiceExposeServiceServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type DaemonService_ExposeServiceServer = grpc.ServerStreamingServer[ExposeServiceEvent] +type DaemonService_ExposeServiceServer interface { + Send(*ExposeServiceEvent) error + grpc.ServerStream +} + +type daemonServiceExposeServiceServer struct { + grpc.ServerStream +} + +func (x *daemonServiceExposeServiceServer) Send(m *ExposeServiceEvent) error { + return x.ServerStream.SendMsg(m) +} // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/client/server/server.go b/client/server/server.go index 648ffa8ce6a..2e985a7a66c 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1515,31 +1515,61 @@ func (s *Server) GetConfig(ctx context.Context, req *proto.GetConfigRequest) (*p sshJWTCacheTTL = int32(*cfg.SSHJWTCacheTTL) } + // Surface what the management server most recently pushed via + // PeerConfig so the UI can show "Follow server (currently: )" + // and use the numeric defaults as placeholders in the override + // fields. All zero/empty when the engine has not received PeerConfig + // yet -- the UI handles that gracefully. + var ( + spMode string + spRelayTOSecs uint32 + spP2pTOSecs uint32 + spP2pRetMax uint32 + ) + if s.connectClient != nil { + if eng := s.connectClient.Engine(); eng != nil { + if cm := eng.ConnMgr(); cm != nil { + spMode = cm.ServerPushedMode().String() + spRelayTOSecs = cm.ServerPushedRelayTimeoutSecs() + spP2pTOSecs = cm.ServerPushedP2pTimeoutSecs() + spP2pRetMax = cm.ServerPushedP2pRetryMaxSecs() + } + } + } + return &proto.GetConfigResponse{ - ManagementUrl: managementURL.String(), - PreSharedKey: preSharedKey, - AdminURL: adminURL.String(), - InterfaceName: cfg.WgIface, - WireguardPort: int64(cfg.WgPort), - Mtu: int64(cfg.MTU), - DisableAutoConnect: cfg.DisableAutoConnect, - ServerSSHAllowed: *cfg.ServerSSHAllowed, - RosenpassEnabled: cfg.RosenpassEnabled, - RosenpassPermissive: cfg.RosenpassPermissive, - LazyConnectionEnabled: cfg.LazyConnectionEnabled, - BlockInbound: cfg.BlockInbound, - DisableNotifications: disableNotifications, - NetworkMonitor: networkMonitor, - DisableDns: disableDNS, - DisableClientRoutes: disableClientRoutes, - DisableServerRoutes: disableServerRoutes, - BlockLanAccess: blockLANAccess, - EnableSSHRoot: enableSSHRoot, - EnableSSHSFTP: enableSSHSFTP, - EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, - EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, - DisableSSHAuth: disableSSHAuth, - SshJWTCacheTTL: sshJWTCacheTTL, + ManagementUrl: managementURL.String(), + PreSharedKey: preSharedKey, + AdminURL: adminURL.String(), + InterfaceName: cfg.WgIface, + WireguardPort: int64(cfg.WgPort), + Mtu: int64(cfg.MTU), + DisableAutoConnect: cfg.DisableAutoConnect, + ServerSSHAllowed: *cfg.ServerSSHAllowed, + RosenpassEnabled: cfg.RosenpassEnabled, + RosenpassPermissive: cfg.RosenpassPermissive, + LazyConnectionEnabled: cfg.LazyConnectionEnabled, + BlockInbound: cfg.BlockInbound, + DisableNotifications: disableNotifications, + NetworkMonitor: networkMonitor, + DisableDns: disableDNS, + DisableClientRoutes: disableClientRoutes, + DisableServerRoutes: disableServerRoutes, + BlockLanAccess: blockLANAccess, + EnableSSHRoot: enableSSHRoot, + EnableSSHSFTP: enableSSHSFTP, + EnableSSHLocalPortForwarding: enableSSHLocalPortForwarding, + EnableSSHRemotePortForwarding: enableSSHRemotePortForwarding, + DisableSSHAuth: disableSSHAuth, + SshJWTCacheTTL: sshJWTCacheTTL, + ConnectionMode: cfg.ConnectionMode, + P2PTimeoutSeconds: cfg.P2pTimeoutSeconds, + RelayTimeoutSeconds: cfg.RelayTimeoutSeconds, + P2PRetryMaxSeconds: cfg.P2pRetryMaxSeconds, + ServerPushedConnectionMode: spMode, + ServerPushedRelayTimeoutSeconds: spRelayTOSecs, + ServerPushedP2PTimeoutSeconds: spP2pTOSecs, + ServerPushedP2PRetryMaxSeconds: spP2pRetMax, }, nil } diff --git a/client/server/server_test.go b/client/server/server_test.go index 641cd85fefe..a91e1dd6014 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/groups" @@ -335,7 +336,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(5*time.Minute), peer_connections.NewSnapshotRouter()) if err != nil { return nil, "", err } diff --git a/client/server/setconfig_test.go b/client/server/setconfig_test.go index b90b5653dc4..9d8ce003e5b 100644 --- a/client/server/setconfig_test.go +++ b/client/server/setconfig_test.go @@ -201,6 +201,17 @@ func verifyAllFieldsCovered(t *testing.T, req *proto.SetConfigRequest) { "EnableSSHRemotePortForwarding": true, "DisableSSHAuth": true, "SshJWTCacheTTL": true, + // Phase 3.7i Connection-Mode fields. Currently in the proto so + // daemons can advertise them via GetConfig, but SetConfig does + // NOT apply them at runtime — they're only persisted via + // `netbird service install/reconfigure --connection-mode/...` + // (writes the active profile file directly; daemon picks up on + // next start). Wiring them through SetConfig is a follow-up + // task. Listed here so the structural test passes. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } val := reflect.ValueOf(req).Elem() @@ -265,6 +276,17 @@ func TestCLIFlags_MappedToSetConfig(t *testing.T) { // SetConfigRequest fields that don't have CLI flags (settable only via UI or other means). fieldsWithoutCLIFlags := map[string]bool{ "DisableNotifications": true, // Only settable via UI + // Phase 3.7i Connection-Mode fields: have CLI flags + // (--connection-mode, --relay-timeout, --p2p-timeout, + // --p2p-retry-max) but those flags belong to the + // `netbird service install/reconfigure` command, not `up`, + // and they bypass the SetConfig RPC entirely (write directly + // to the active profile file). So from this test's + // perspective they have no SetConfig-mapped CLI flag. + "ConnectionMode": true, + "P2PTimeoutSeconds": true, + "RelayTimeoutSeconds": true, + "P2PRetryMaxSeconds": true, } // Get all SetConfigRequest fields to verify our map is complete. diff --git a/client/status/status.go b/client/status/status.go index 8c932bbab29..60da8f303f9 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -73,6 +73,10 @@ type PeerStateDetailOutput struct { Latency time.Duration `json:"latency" yaml:"latency"` RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` Networks []string `json:"networks" yaml:"networks"` + // Phase 3 (#5989): ICE-backoff state for p2p-dynamic mode. + IceBackoffFailures int `json:"iceBackoffFailures" yaml:"iceBackoffFailures"` + IceBackoffNextRetry time.Time `json:"iceBackoffNextRetry" yaml:"iceBackoffNextRetry"` + IceBackoffSuspended bool `json:"iceBackoffSuspended" yaml:"iceBackoffSuspended"` } type PeersStateOutput struct { @@ -337,6 +341,9 @@ func mapPeers( Latency: pbPeerState.GetLatency().AsDuration(), RosenpassEnabled: pbPeerState.GetRosenpassEnabled(), Networks: pbPeerState.GetNetworks(), + IceBackoffFailures: int(pbPeerState.GetIceBackoffFailures()), + IceBackoffNextRetry: iceBackoffNextRetry(pbPeerState), + IceBackoffSuspended: pbPeerState.GetIceBackoffSuspended(), } peersStateDetail = append(peersStateDetail, peerState) @@ -645,6 +652,9 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { Networks: maps.Keys(peerState.GetRoutes()), Latency: durationpb.New(peerState.Latency), SshHostKey: peerState.SSHHostKey, + IceBackoffFailures: int32(peerState.IceBackoffFailures), + IceBackoffNextRetry: timestamppb.New(peerState.IceBackoffNextRetry), + IceBackoffSuspended: peerState.IceBackoffSuspended, } pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) } @@ -683,6 +693,17 @@ func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { return &pbFullStatus } +// iceBackoffNextRetry returns the ICE backoff next-retry time from a proto +// PeerState. If the timestamp field is unset (nil), it returns Go's zero +// time to match the daemon's zero-valued State.IceBackoffNextRetry. +func iceBackoffNextRetry(pbPeerState *proto.PeerState) time.Time { + ts := pbPeerState.GetIceBackoffNextRetry() + if ts == nil { + return time.Time{} + } + return ts.AsTime().Local() +} + func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bool) string { var ( peersString = "" @@ -768,6 +789,21 @@ func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bo peerState.Latency.String(), ) + // Phase 3 (#5989): append ICE-backoff line only when suspended AND + // the suspension has not yet expired by wall-clock. The PeerState + // snapshot is only refreshed on ICE state-change events, so the + // suspended-flag stays true even after nextRetry has passed; the + // time-check here suppresses the noise for already-expired windows. + if peerState.IceBackoffSuspended && time.Now().Before(peerState.IceBackoffNextRetry) { + remaining := time.Until(peerState.IceBackoffNextRetry).Round(time.Second) + peerString += fmt.Sprintf( + " ICE backoff: suspended for %s (failure #%d, retry at %s)\n", + remaining, + peerState.IceBackoffFailures, + peerState.IceBackoffNextRetry.Format("15:04:05"), + ) + } + peersString += peerString } return peersString diff --git a/client/status/status_test.go b/client/status/status_test.go index 7754eebae97..5c99461b551 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -304,7 +304,10 @@ func TestParsingToJSON(t *testing.T) { "quantumResistance": false, "networks": [ "10.1.0.0/24" - ] + ], + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false }, { "fqdn": "peer-2.awesome-domain.com", @@ -327,7 +330,10 @@ func TestParsingToJSON(t *testing.T) { "transferSent": 1000, "latency": 10000000, "quantumResistance": false, - "networks": null + "networks": null, + "iceBackoffFailures": 0, + "iceBackoffNextRetry": "0001-01-01T00:00:00Z", + "iceBackoffSuspended": false } ] }, @@ -436,6 +442,9 @@ func TestParsingToYAML(t *testing.T) { quantumResistance: false networks: - 10.1.0.0/24 + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false - fqdn: peer-2.awesome-domain.com netbirdIp: 192.168.178.102 publicKey: Pubkey2 @@ -455,6 +464,9 @@ func TestParsingToYAML(t *testing.T) { latency: 10ms quantumResistance: false networks: [] + iceBackoffFailures: 0 + iceBackoffNextRetry: 0001-01-01T00:00:00Z + iceBackoffSuspended: false cliVersion: development daemonVersion: 0.14.1 daemonStatus: Connected diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 28f98ae59ae..53b524162ec 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -251,7 +251,6 @@ type serviceClient struct { mAllowSSH *systray.MenuItem mAutoConnect *systray.MenuItem mEnableRosenpass *systray.MenuItem - mLazyConnEnabled *systray.MenuItem mBlockInbound *systray.MenuItem mNotifications *systray.MenuItem mAdvancedSettings *systray.MenuItem @@ -287,6 +286,27 @@ type serviceClient struct { sDisableSSHAuth *widget.Check iSSHJWTCacheTTL *widget.Entry + // Phase 1+ ConnectionMode selector + per-mode timeout overrides. + // Defaulting to "Follow server" leaves the local override empty so + // the daemon uses whatever the management server pushes. + sConnectionMode *widget.Select + iRelayTimeout *widget.Entry + iP2pTimeout *widget.Entry + iP2pRetryMax *widget.Entry + connectionMode string + relayTimeoutSecs uint32 + p2pTimeoutSecs uint32 + p2pRetryMaxSecs uint32 + + // Phase 3.7h: latest values pushed by the management server, captured + // from GetConfigResponse.ServerPushed*. Used to render the + // "Follow server (currently: )" entry in the dropdown and the + // "use server default (Ns)" hints in the timeout entries. + serverPushedMode string + serverPushedRelayTimeoutSecs uint32 + serverPushedP2pTimeoutSecs uint32 + serverPushedP2pRetryMaxSecs uint32 + // observable settings over corresponding iMngURL and iPreSharedKey values. managementURL string preSharedKey string @@ -476,6 +496,19 @@ func (s *serviceClient) showSettingsUI() { s.sDisableSSHAuth = widget.NewCheck("Disable SSH Authentication", nil) s.iSSHJWTCacheTTL = widget.NewEntry() + // Connection-mode override + per-mode timeout fields. + // Order matches the Android spinner so behaviour is consistent. + s.sConnectionMode = widget.NewSelect( + []string{"Follow server", "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic"}, + func(string) { s.updateTimeoutEntriesEnabled() }, + ) + s.iRelayTimeout = widget.NewEntry() + s.iRelayTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pTimeout = widget.NewEntry() + s.iP2pTimeout.SetPlaceHolder("seconds (empty = use server default)") + s.iP2pRetryMax = widget.NewEntry() + s.iP2pRetryMax.SetPlaceHolder("seconds (empty = use server default)") + s.wSettings.SetContent(s.getSettingsForm()) s.wSettings.Resize(fyne.NewSize(600, 400)) s.wSettings.SetFixedSize(true) @@ -586,9 +619,52 @@ func (s *serviceClient) hasSettingsChanged(iMngURL string, port, mtu int64) bool s.disableClientRoutes != s.sDisableClientRoutes.Checked || s.disableServerRoutes != s.sDisableServerRoutes.Checked || s.blockLANAccess != s.sBlockLANAccess.Checked || + s.hasConnectionModeChanges() || s.hasSSHChanges() } +// hasConnectionModeChanges reports whether the user touched the +// Connection Mode dropdown or any of the timeout entries on the +// Network tab. Empty / non-numeric timeout entries map to 0 +// (= no override). +func (s *serviceClient) hasConnectionModeChanges() bool { + if s.sConnectionMode == nil { + return false + } + desired := s.selectedConnectionMode() + if s.connectionMode != desired { + return true + } + return s.relayTimeoutSecs != parseUint32Field(s.iRelayTimeout.Text) || + s.p2pTimeoutSecs != parseUint32Field(s.iP2pTimeout.Text) || + s.p2pRetryMaxSecs != parseUint32Field(s.iP2pRetryMax.Text) +} + +// selectedConnectionMode returns the canonical mode string for the +// current dropdown selection. The "Follow server" entry maps to empty +// (clears any local override). It may carry a "(currently: )" +// suffix when the engine has received a PeerConfig, so we match by +// prefix. +func (s *serviceClient) selectedConnectionMode() string { + v := s.sConnectionMode.Selected + if v == "" || strings.HasPrefix(v, "Follow server") { + return "" + } + return v +} + +func parseUint32Field(text string) uint32 { + t := strings.TrimSpace(text) + if t == "" { + return 0 + } + v, err := strconv.ParseUint(t, 10, 32) + if err != nil { + return 0 + } + return uint32(v) +} + func (s *serviceClient) applySettingsChanges(iMngURL string, port, mtu int64) error { s.managementURL = iMngURL s.preSharedKey = s.iPreSharedKey.Text @@ -662,6 +738,17 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( req.OptionalPreSharedKey = &s.iPreSharedKey.Text } + // Connection-mode override + per-mode timeouts. Empty connection_mode + // clears any local override (= "Follow server"). + connMode := s.selectedConnectionMode() + req.ConnectionMode = &connMode + relaySecs := parseUint32Field(s.iRelayTimeout.Text) + p2pSecs := parseUint32Field(s.iP2pTimeout.Text) + retrySecs := parseUint32Field(s.iP2pRetryMax.Text) + req.RelayTimeoutSeconds = &relaySecs + req.P2PTimeoutSeconds = &p2pSecs + req.P2PRetryMaxSeconds = &retrySecs + return req, nil } @@ -731,10 +818,94 @@ func (s *serviceClient) getNetworkForm() *widget.Form { {Text: "Disable Client Routes", Widget: s.sDisableClientRoutes}, {Text: "Disable Server Routes", Widget: s.sDisableServerRoutes}, {Text: "Disable LAN Access", Widget: s.sBlockLANAccess}, + {Text: "Connection Mode", Widget: s.sConnectionMode}, + {Text: "Relay Timeout (s)", Widget: s.iRelayTimeout}, + {Text: "P2P Timeout (s)", Widget: s.iP2pTimeout}, + {Text: "P2P Retry-Max (s)", Widget: s.iP2pRetryMax}, }, } } +// followServerLabel returns the dropdown text for the "Follow server" +// option. When the engine has received a PeerConfig and the server has +// pushed a mode, we suffix it with "(currently: )" so users see +// what they would inherit by leaving the override on Follow server. +func (s *serviceClient) followServerLabel() string { + if s.serverPushedMode == "" { + return "Follow server" + } + return "Follow server (currently: " + s.serverPushedMode + ")" +} + +// formatTimeoutHint renders the placeholder text for an empty override +// entry, including the actual server-pushed default in seconds when +// available. +func formatTimeoutHint(secs uint32) string { + if secs == 0 { + return "seconds (empty = use server default)" + } + return "seconds (empty = use server default, " + strconv.FormatUint(uint64(secs), 10) + "s)" +} + +// refreshConnectionModeWidgets re-renders the Connection Mode dropdown +// and the timeout entries' placeholder text based on the latest +// server-pushed values. Safe to call multiple times. Preserves the +// current selection by canonical-mode string (so "(currently: ...)" +// suffix changes do not lose the user's choice). +func (s *serviceClient) refreshConnectionModeWidgets() { + if s.sConnectionMode == nil { + return + } + prev := s.selectedConnectionMode() + s.sConnectionMode.Options = []string{ + s.followServerLabel(), + "relay-forced", + "p2p", + "p2p-lazy", + "p2p-dynamic", + } + if prev == "" { + s.sConnectionMode.SetSelected(s.followServerLabel()) + } else { + s.sConnectionMode.SetSelected(prev) + } + s.sConnectionMode.Refresh() + + if s.iRelayTimeout != nil { + s.iRelayTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedRelayTimeoutSecs)) + } + if s.iP2pTimeout != nil { + s.iP2pTimeout.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pTimeoutSecs)) + } + if s.iP2pRetryMax != nil { + s.iP2pRetryMax.SetPlaceHolder(formatTimeoutHint(s.serverPushedP2pRetryMaxSecs)) + } +} + +// updateTimeoutEntriesEnabled enables only the timeout fields that are +// meaningful for the currently-selected connection mode. The lazy +// connection manager (and therefore inactivity teardown) only runs in +// p2p-lazy + p2p-dynamic, so other modes get all three fields disabled. +func (s *serviceClient) updateTimeoutEntriesEnabled() { + if s.iRelayTimeout == nil { + return + } + switch s.sConnectionMode.Selected { + case "p2p-lazy": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + case "p2p-dynamic": + s.iRelayTimeout.Enable() + s.iP2pTimeout.Enable() + s.iP2pRetryMax.Enable() + default: + s.iRelayTimeout.Disable() + s.iP2pTimeout.Disable() + s.iP2pRetryMax.Disable() + } +} + func (s *serviceClient) getSSHForm() *widget.Form { return &widget.Form{ Items: []*widget.FormItem{ @@ -1042,7 +1213,6 @@ func (s *serviceClient) onTrayReady() { s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", allowSSHMenuDescr, false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", autoConnectMenuDescr, false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", quantumResistanceMenuDescr, false) - s.mLazyConnEnabled = s.mSettings.AddSubMenuItemCheckbox("Enable Lazy Connections", lazyConnMenuDescr, false) s.mBlockInbound = s.mSettings.AddSubMenuItemCheckbox("Block Inbound Connections", blockInboundMenuDescr, false) s.mNotifications = s.mSettings.AddSubMenuItemCheckbox("Notifications", notificationsMenuDescr, false) s.mSettings.AddSeparator() @@ -1069,7 +1239,7 @@ func (s *serviceClient) onTrayReady() { s.mExitNode.Disable() s.exitNodeMu.Unlock() - s.mNetworks = systray.AddMenuItem("Networks", networksMenuDescr) + s.mNetworks = systray.AddMenuItem("Peers and Networks", networksMenuDescr) s.mNetworks.Disable() systray.AddSeparator() @@ -1314,6 +1484,14 @@ func (s *serviceClient) getSrvConfig() { cfg = protoConfigToConfig(srvCfg) + // Capture the raw server-pushed values so the UI can show + // "Follow server (currently: )" and the numeric default-hints + // in the override entries. + s.serverPushedMode = srvCfg.GetServerPushedConnectionMode() + s.serverPushedRelayTimeoutSecs = srvCfg.GetServerPushedRelayTimeoutSeconds() + s.serverPushedP2pTimeoutSecs = srvCfg.GetServerPushedP2PTimeoutSeconds() + s.serverPushedP2pRetryMaxSecs = srvCfg.GetServerPushedP2PRetryMaxSeconds() + if cfg.ManagementURL.String() != "" { s.managementURL = cfg.ManagementURL.String() } @@ -1348,6 +1526,11 @@ func (s *serviceClient) getSrvConfig() { s.sshJWTCacheTTL = *cfg.SSHJWTCacheTTL } + s.connectionMode = cfg.ConnectionMode + s.relayTimeoutSecs = cfg.RelayTimeoutSeconds + s.p2pTimeoutSecs = cfg.P2pTimeoutSeconds + s.p2pRetryMaxSecs = cfg.P2pRetryMaxSeconds + if s.showAdvancedSettings { s.iMngURL.SetText(s.managementURL) s.iPreSharedKey.SetText(cfg.PreSharedKey) @@ -1386,6 +1569,33 @@ func (s *serviceClient) getSrvConfig() { if cfg.SSHJWTCacheTTL != nil { s.iSSHJWTCacheTTL.SetText(strconv.Itoa(*cfg.SSHJWTCacheTTL)) } + + // Connection-mode dropdown + timeout entries. Refresh first so + // the "Follow server (currently: ...)" suffix and the numeric + // default-hints reflect what GetConfigResponse just delivered. + s.refreshConnectionModeWidgets() + switch cfg.ConnectionMode { + case "relay-forced", "p2p", "p2p-lazy", "p2p-dynamic": + s.sConnectionMode.SetSelected(cfg.ConnectionMode) + default: + s.sConnectionMode.SetSelected(s.followServerLabel()) + } + if cfg.RelayTimeoutSeconds == 0 { + s.iRelayTimeout.SetText("") + } else { + s.iRelayTimeout.SetText(strconv.FormatUint(uint64(cfg.RelayTimeoutSeconds), 10)) + } + if cfg.P2pTimeoutSeconds == 0 { + s.iP2pTimeout.SetText("") + } else { + s.iP2pTimeout.SetText(strconv.FormatUint(uint64(cfg.P2pTimeoutSeconds), 10)) + } + if cfg.P2pRetryMaxSeconds == 0 { + s.iP2pRetryMax.SetText("") + } else { + s.iP2pRetryMax.SetText(strconv.FormatUint(uint64(cfg.P2pRetryMaxSeconds), 10)) + } + s.updateTimeoutEntriesEnabled() } if s.mNotifications == nil { @@ -1465,6 +1675,12 @@ func protoConfigToConfig(cfg *proto.GetConfigResponse) *profilemanager.Config { ttl := int(cfg.SshJWTCacheTTL) config.SSHJWTCacheTTL = &ttl + // Phase 1+ ConnectionMode override + per-mode timeouts. + config.ConnectionMode = cfg.ConnectionMode + config.RelayTimeoutSeconds = cfg.RelayTimeoutSeconds + config.P2pTimeoutSeconds = cfg.P2PTimeoutSeconds + config.P2pRetryMaxSeconds = cfg.P2PRetryMaxSeconds + return &config } @@ -1551,12 +1767,6 @@ func (s *serviceClient) loadSettings() { s.mEnableRosenpass.Uncheck() } - if cfg.LazyConnectionEnabled { - s.mLazyConnEnabled.Check() - } else { - s.mLazyConnEnabled.Uncheck() - } - if cfg.BlockInbound { s.mBlockInbound.Check() } else { @@ -1579,7 +1789,6 @@ func (s *serviceClient) updateConfig() error { disableAutoStart := !s.mAutoConnect.Checked() sshAllowed := s.mAllowSSH.Checked() rosenpassEnabled := s.mEnableRosenpass.Checked() - lazyConnectionEnabled := s.mLazyConnEnabled.Checked() blockInbound := s.mBlockInbound.Checked() notificationsDisabled := !s.mNotifications.Checked() @@ -1602,14 +1811,13 @@ func (s *serviceClient) updateConfig() error { } req := proto.SetConfigRequest{ - ProfileName: activeProf.Name, - Username: currUser.Username, - DisableAutoConnect: &disableAutoStart, - ServerSSHAllowed: &sshAllowed, - RosenpassEnabled: &rosenpassEnabled, - LazyConnectionEnabled: &lazyConnectionEnabled, - BlockInbound: &blockInbound, - DisableNotifications: ¬ificationsDisabled, + ProfileName: activeProf.Name, + Username: currUser.Username, + DisableAutoConnect: &disableAutoStart, + ServerSSHAllowed: &sshAllowed, + RosenpassEnabled: &rosenpassEnabled, + BlockInbound: &blockInbound, + DisableNotifications: ¬ificationsDisabled, } if _, err := conn.SetConfig(s.ctx, &req); err != nil { diff --git a/client/ui/const.go b/client/ui/const.go index 48619be752c..ce7a9a29421 100644 --- a/client/ui/const.go +++ b/client/ui/const.go @@ -4,7 +4,6 @@ const ( allowSSHMenuDescr = "Allow SSH connections" autoConnectMenuDescr = "Connect automatically when the service starts" quantumResistanceMenuDescr = "Enable post-quantum security via Rosenpass" - lazyConnMenuDescr = "[Experimental] Enable lazy connections" blockInboundMenuDescr = "Block inbound connections to the local machine and routed networks" notificationsMenuDescr = "Enable notifications" advancedSettingsMenuDescr = "Advanced settings of the application" diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 876fcef5fd8..90208230867 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -43,8 +43,6 @@ func (h *eventHandler) listen(ctx context.Context) { h.handleAutoConnectClick() case <-h.client.mEnableRosenpass.ClickedCh: h.handleRosenpassClick() - case <-h.client.mLazyConnEnabled.ClickedCh: - h.handleLazyConnectionClick() case <-h.client.mBlockInbound.ClickedCh: h.handleBlockInboundClick() case <-h.client.mAdvancedSettings.ClickedCh: @@ -152,15 +150,6 @@ func (h *eventHandler) handleRosenpassClick() { } } -func (h *eventHandler) handleLazyConnectionClick() { - h.toggleCheckbox(h.client.mLazyConnEnabled) - if err := h.updateConfigWithErr(); err != nil { - h.toggleCheckbox(h.client.mLazyConnEnabled) // revert checkbox state on error - log.Errorf("failed to update config: %v", err) - h.client.notifier.Send("Error", "Failed to update lazy connection settings") - } -} - func (h *eventHandler) handleBlockInboundClick() { h.toggleCheckbox(h.client.mBlockInbound) if err := h.updateConfigWithErr(); err != nil { diff --git a/client/ui/network.go b/client/ui/network.go index 571e871bbf2..e241c256446 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -25,6 +25,7 @@ const ( allNetworksText = "All networks" overlappingNetworksText = "Overlapping networks" exitNodeNetworksText = "Exit-node networks" + peersText = "Peers" allNetworks filter = "all" overlappingNetworks filter = "overlapping" exitNodeNetworks filter = "exit-node" @@ -34,7 +35,7 @@ const ( type filter string func (s *serviceClient) showNetworksUI() { - s.wNetworks = s.app.NewWindow("Networks") + s.wNetworks = s.app.NewWindow("Peers and Networks") s.wNetworks.SetOnClosed(s.cancel) allGrid := container.New(layout.NewGridLayout(3)) @@ -42,17 +43,64 @@ func (s *serviceClient) showNetworksUI() { overlappingGrid := container.New(layout.NewGridLayout(3)) exitNodeGrid := container.New(layout.NewGridLayout(3)) routeCheckContainer := container.NewVBox() + peersBundle := s.buildPeersTabContent(s.ctx) + // Wrap the Peers tab content in a Stack so it fills the full tab + // area (NewBorder alone collapses when child MinSizes are small). tabs := container.NewAppTabs( + container.NewTabItem(peersText, container.NewStack(peersBundle.Content)), container.NewTabItem(allNetworksText, allGrid), container.NewTabItem(overlappingNetworksText, overlappingGrid), container.NewTabItem(exitNodeNetworksText, exitNodeGrid), ) - tabs.OnSelected = func(item *container.TabItem) { + + // Phase 3.7i (#5989): the outer footer adapts to the active tab so + // the user has a single place for actions. On the Peers tab we show + // only Show-Full + Refresh; on a Networks tab we show the legacy + // Refresh + Select-all + Deselect-All. + selectAllBtn := widget.NewButton("Select all", func() { + _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + s.selectAllFilteredNetworks(f) + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + }) + deselectAllBtn := widget.NewButton("Deselect All", func() { + _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + s.deselectAllFilteredNetworks(f) s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + }) + refreshBtn := widget.NewButton("Refresh", func() { + if tabs.Selected() != nil && tabs.Selected().Text == peersText { + peersBundle.Refresh() + } else { + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + } + }) + + updateFooter := func() { + onPeers := tabs.Selected() != nil && tabs.Selected().Text == peersText + if onPeers { + peersBundle.ShowFull.Show() + selectAllBtn.Hide() + deselectAllBtn.Hide() + } else { + peersBundle.ShowFull.Hide() + selectAllBtn.Show() + deselectAllBtn.Show() + } + } + + tabs.OnSelected = func(item *container.TabItem) { + updateFooter() + if item != nil && item.Text != peersText { + s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + } } tabs.OnUnselected = func(item *container.TabItem) { - grid, _ := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - grid.Objects = nil + // Only reset network grids when leaving a network tab; the + // peers VBox manages its own state. + if item != nil && item.Text != peersText { + grid, _ := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) + grid.Objects = nil + } } routeCheckContainer.Add(tabs) @@ -61,21 +109,13 @@ func (s *serviceClient) showNetworksUI() { buttonBox := container.NewHBox( layout.NewSpacer(), - widget.NewButton("Refresh", func() { - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), - widget.NewButton("Select all", func() { - _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - s.selectAllFilteredNetworks(f) - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), - widget.NewButton("Deselect All", func() { - _, f := getGridAndFilterFromTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - s.deselectAllFilteredNetworks(f) - s.updateNetworksBasedOnDisplayTab(tabs, allGrid, overlappingGrid, exitNodeGrid) - }), + peersBundle.ShowFull, + refreshBtn, + selectAllBtn, + deselectAllBtn, layout.NewSpacer(), ) + updateFooter() // initial state matches the first tab (Peers) content := container.NewBorder(nil, buttonBox, nil, nil, scrollContainer) diff --git a/client/ui/peers_tab.go b/client/ui/peers_tab.go new file mode 100644 index 00000000000..5a2a55190fc --- /dev/null +++ b/client/ui/peers_tab.go @@ -0,0 +1,305 @@ +//go:build !(linux && 386) + +package main + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "fyne.io/fyne/v2" + "fyne.io/fyne/v2/container" + "fyne.io/fyne/v2/widget" + + "github.com/netbirdio/netbird/client/proto" +) + +// peersTabBundle is what buildPeersTabContent returns: the tab content +// that lives inside AppTabs PLUS the Show-Full checkbox + the refresh +// callback that the OUTER window footer needs (so the user has a single +// footer for both showFull-toggle and Refresh-trigger). Phase 3.7i. +type peersTabBundle struct { + Content fyne.CanvasObject + ShowFull *widget.Check + Refresh func() +} + +// buildPeersTabContent constructs the "Peers" tab content (counter + +// list of expandable peer rows). Show-Full + Refresh live in the outer +// window footer (returned via peersTabBundle so network.go can place +// them). Phase 3.7i of #5989. +func (s *serviceClient) buildPeersTabContent(ctx context.Context) peersTabBundle { + summary := widget.NewLabel("") + breakdown := widget.NewLabel("") + listVBox := container.NewVBox() + showFull := widget.NewCheck("Show full peer details", nil) + + // Per-peer expand state survives Refresh (otherwise every render + // would collapse all rows the user just opened). Keyed by pubkey. + expandedMu := sync.Mutex{} + expanded := make(map[string]bool) + + render := func() { + conn, err := s.getSrvClient(failFastTimeout) + if err != nil { + fyne.Do(func() { summary.SetText("Error: " + err.Error()) }) + return + } + callCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + st, err := conn.Status(callCtx, &proto.StatusRequest{GetFullPeerStatus: true}) + if err != nil { + fyne.Do(func() { summary.SetText("Error: " + err.Error()) }) + return + } + fs := st.GetFullStatus() + + fyne.Do(func() { + summary.SetText(fmt.Sprintf("%d of %d peers online (server)", + fs.GetServerOnlinePeers(), fs.GetConfiguredPeersTotal())) + breakdown.SetText(fmt.Sprintf("%d P2P | %d relayed | %d idle | %d offline", + fs.GetP2PConnectedPeers(), fs.GetRelayedConnectedPeers(), + fs.GetIdleOnlinePeers(), fs.GetServerOfflinePeers())) + + listVBox.Objects = nil + peers := fs.GetPeers() + sort.SliceStable(peers, func(i, j int) bool { + gi, gj := peerGroup(peers[i]), peerGroup(peers[j]) + if gi != gj { + return gi < gj + } + return strings.ToLower(peers[i].GetFqdn()) < strings.ToLower(peers[j].GetFqdn()) + }) + for _, p := range peers { + listVBox.Add(newPeerRow(p, showFull.Checked, &expandedMu, expanded)) + } + listVBox.Refresh() + }) + } + + showFull.OnChanged = func(_ bool) { render() } + + // Lifecycle-safe periodic refresh: ctx-respecting, exits when the + // serviceClient context is cancelled (i.e. the UI process shuts down). + // 30 s polling -- daemon-RPC is local so cost is small. + go func() { + render() + t := time.NewTicker(30 * time.Second) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + render() + } + } + }() + + // Place listVBox directly in Border center (no inner VScroll). The + // outer Networks-window already wraps everything in a VScroll, so + // nesting another would create double-scroll UX. Border center + // auto-grows to fit listVBox content; outer scroll handles overflow. + content := container.NewBorder( + container.NewVBox(summary, breakdown), + nil, nil, nil, + listVBox, + ) + return peersTabBundle{Content: content, ShowFull: showFull, Refresh: render} +} + +// newPeerRow returns a single expandable row: a clickable header that +// dynamically adds/removes a detail label below it on tap. Expansion +// state is persisted in `expanded` (keyed by pubkey) so Refresh doesn't +// collapse rows the user just opened. Multiple rows can be expanded +// simultaneously (each row owns its own state). Phase 3.7i of #5989. +func newPeerRow(p *proto.PeerState, showFull bool, mu *sync.Mutex, expanded map[string]bool) *fyne.Container { + pubkey := p.GetPubKey() + titleCollapsed := fmt.Sprintf("▶ %s %s %s", peerGlyph(p), peerHostnameShort(p), peerModeTag(p)) + titleExpanded := fmt.Sprintf("▼ %s %s %s", peerGlyph(p), peerHostnameShort(p), peerModeTag(p)) + + mu.Lock() + startExpanded := expanded[pubkey] + mu.Unlock() + + header := widget.NewButton(titleCollapsed, nil) + header.Alignment = widget.ButtonAlignLeading + header.Importance = widget.LowImportance + + box := container.NewVBox(header) + var detail *widget.Label + + addDetail := func() { + detail = widget.NewLabel(buildPeerDetailText(p, showFull)) + detail.Wrapping = fyne.TextWrapWord + detail.TextStyle = fyne.TextStyle{Monospace: true} + box.Add(detail) + header.SetText(titleExpanded) + } + removeDetail := func() { + if detail != nil { + box.Remove(detail) + detail = nil + } + header.SetText(titleCollapsed) + } + + if startExpanded { + addDetail() + } + + header.OnTapped = func() { + mu.Lock() + nowExpanded := !expanded[pubkey] + expanded[pubkey] = nowExpanded + mu.Unlock() + if nowExpanded { + addDetail() + } else { + removeDetail() + } + box.Refresh() + } + return box +} + +func peerGroup(p *proto.PeerState) int { + if !p.GetServerOnline() { + return 3 + } + cs := strings.ToLower(p.GetConnStatus()) + if cs == "connected" && !p.GetRelayed() { + return 0 + } + if cs == "connected" && p.GetRelayed() { + return 1 + } + return 2 +} + +func peerGlyph(p *proto.PeerState) string { + switch peerGroup(p) { + case 0: + return "[P2P]" + case 1: + return "[Relay]" + case 2: + return "[Idle]" + default: + return "[Offline]" + } +} + +func peerHostnameShort(p *proto.PeerState) string { + fqdn := p.GetFqdn() + if i := strings.Index(fqdn, "."); i > 0 { + return fqdn[:i] + } + return fqdn +} + +func peerModeTag(p *proto.PeerState) string { + eff, cfg := p.GetEffectiveConnectionMode(), p.GetConfiguredConnectionMode() + if eff == "" { + return "" + } + if cfg != "" && cfg != eff { + return "! " + eff + " (cfg: " + cfg + ")" + } + return eff +} + +// buildPeerDetailText builds the per-peer detail text. Standard fields +// always shown. When `full` is true an additional section with the +// extra technical fields (transfer counters, configured timeouts, etc.) +// is appended. +func buildPeerDetailText(p *proto.PeerState, full bool) string { + var sb strings.Builder + fmt.Fprintf(&sb, "IP: %s\n", p.GetIP()) + fmt.Fprintf(&sb, "FQDN: %s\n", p.GetFqdn()) + connType := p.GetConnStatus() + if p.GetRelayed() { + connType += " (relayed)" + } + fmt.Fprintf(&sb, "Connection type: %s\n", connType) + fmt.Fprintf(&sb, "Effective mode: %s\n", orDashStr(p.GetEffectiveConnectionMode())) + if p.GetEffectiveConnectionMode() != p.GetConfiguredConnectionMode() && p.GetConfiguredConnectionMode() != "" { + fmt.Fprintf(&sb, "Configured mode: %s\n", orDashStr(p.GetConfiguredConnectionMode())) + } + if hs := p.GetLastWireguardHandshake(); hs != nil && hs.IsValid() { + fmt.Fprintf(&sb, "Last handshake: %s\n", hs.AsTime().Format(time.RFC3339)) + } + fmt.Fprintf(&sb, "Latency: %s\n", peerLatencyStr(p)) + if strings.EqualFold(p.GetConnStatus(), "connected") { + if p.GetRelayed() { + fmt.Fprintf(&sb, "Relay server: %s\n", orDashStr(p.GetRelayAddress())) + } else { + fmt.Fprintf(&sb, "Local endpoint: %s\n", orDashStr(p.GetLocalIceCandidateEndpoint())) + fmt.Fprintf(&sb, "Remote endpoint: %s\n", orDashStr(p.GetRemoteIceCandidateEndpoint())) + } + } + if ls := p.GetLastSeenAtServer(); ls != nil && ls.IsValid() { + fmt.Fprintf(&sb, "Last seen at srv: %s\n", ls.AsTime().Format(time.RFC3339)) + } + if g := p.GetGroups(); len(g) > 0 { + fmt.Fprintf(&sb, "Groups: %s\n", strings.Join(g, ", ")) + } + + if full { + sb.WriteString("\n--- Full details ---\n") + fmt.Fprintf(&sb, "Public key: %s\n", p.GetPubKey()) + fmt.Fprintf(&sb, "Transfer rx/tx: %s / %s\n", + humanBytes(uint64(p.GetBytesRx())), humanBytes(uint64(p.GetBytesTx()))) + if eff := p.GetEffectiveRelayTimeoutSecs(); eff > 0 { + fmt.Fprintf(&sb, "Relay timeout: %d s (eff)\n", eff) + } + if eff := p.GetEffectiveP2PTimeoutSecs(); eff > 0 { + fmt.Fprintf(&sb, "P2P timeout: %d s (eff)\n", eff) + } + if eff := p.GetEffectiveP2PRetryMaxSecs(); eff > 0 { + fmt.Fprintf(&sb, "P2P retry-max: %d s (eff)\n", eff) + } + if local, remote := p.GetLocalIceCandidateType(), p.GetRemoteIceCandidateType(); local != "" || remote != "" { + fmt.Fprintf(&sb, "ICE candidate L/R: %s / %s\n", orDashStr(local), orDashStr(remote)) + } + if iceFails := p.GetIceBackoffFailures(); iceFails > 0 { + fmt.Fprintf(&sb, "ICE backoff fails: %d\n", iceFails) + } + } + return sb.String() +} + +func orDashStr(s string) string { + if s == "" { + return "-" + } + return s +} + +func peerLatencyStr(p *proto.PeerState) string { + lat := p.GetLatency() + if lat == nil { + return "-" + } + d := lat.AsDuration() + if d == 0 { + return "-" + } + return d.Round(time.Microsecond).String() +} + +func humanBytes(b uint64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := uint64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp]) +} diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 36de950e9d7..a1418f7fd60 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -169,6 +169,9 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin return fmt.Errorf("failed to get account zones: %v", err) } + // Phase 3.7i: build once, share read-only across goroutines. + groupNamesByPeerID := grpc.BuildGroupNamesByPeerID(account.Groups) + for _, peer := range account.Peers { if !c.peersUpdateManager.HasChannel(peer.ID) { log.WithContext(ctx).Tracef("peer %s doesn't have a channel, skipping network map update", peer.ID) @@ -203,7 +206,7 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin peerGroups := account.GetPeerGroups(p.ID) start = time.Now() - update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, p, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSetting, maps.Keys(peerGroups), dnsFwdPort) + update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, p, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSetting, maps.Keys(peerGroups), dnsFwdPort, groupNamesByPeerID) c.metrics.CountToSyncResponseDuration(time.Since(start)) c.peersUpdateManager.SendUpdate(ctx, p.ID, &network_map.UpdateMessage{ @@ -324,8 +327,10 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe peerGroups := account.GetPeerGroups(peerId) dnsFwdPort := computeForwarderPort(maps.Values(account.Peers), network_map.DnsForwarderPortMinVersion) + // Phase 3.7i: build group names map for remote-peer annotations. + groupNamesByPeerID := grpc.BuildGroupNamesByPeerID(account.Groups) - update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, peer, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSettings, maps.Keys(peerGroups), dnsFwdPort) + update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, peer, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSettings, maps.Keys(peerGroups), dnsFwdPort, groupNamesByPeerID) c.peersUpdateManager.SendUpdate(ctx, peer.ID, &network_map.UpdateMessage{ Update: update, MessageType: network_map.MessageTypeNetworkMap, diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index f2ab0a2c4df..d55b6a7f38b 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -5,7 +5,6 @@ package server import ( "context" "crypto/tls" - "net/http" "net/netip" "slices" "time" @@ -30,6 +29,7 @@ import ( nbcache "github.com/netbirdio/netbird/management/server/cache" nbContext "github.com/netbirdio/netbird/management/server/context" nbhttp "github.com/netbirdio/netbird/management/server/http" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/server/http/middleware" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/telemetry" @@ -108,9 +108,23 @@ func (s *BaseServer) EventStore() activity.Store { }) } -func (s *BaseServer) APIHandler() http.Handler { - return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies, s.RateLimiter()) +// PeerConnStore returns the shared in-memory peer-connection-map store. +// Phase 3.7i of #5989: constructed once, shared between gRPC and HTTP servers. +func (s *BaseServer) PeerConnStore() peer_connections.Store { + return Create(s, func() peer_connections.Store { + return peer_connections.NewMemoryStore(1 * time.Hour) + }) +} + +// PeerConnRouter returns the shared SnapshotRouter. +// Phase 3.7i of #5989: constructed once, shared between gRPC and HTTP servers. +func (s *BaseServer) PeerConnRouter() *peer_connections.SnapshotRouter { + return Create(s, peer_connections.NewSnapshotRouter) +} + +func (s *BaseServer) APIHandler() *nbhttp.APIHandler { + return Create(s, func() *nbhttp.APIHandler { + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies, s.RateLimiter(), s.PeerConnStore(), s.PeerConnRouter()) if err != nil { log.Fatalf("failed to create API handler: %v", err) } @@ -173,7 +187,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } gRPCAPIHandler := grpc.NewServer(gRPCOpts...) - srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider(), s.SessionStore()) + srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider(), s.SessionStore(), s.PeerConnStore(), s.PeerConnRouter()) if err != nil { log.Fatalf("failed to create management server: %v", err) } diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index ef417d3cfb5..ad3e22f904b 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -4,11 +4,14 @@ import ( "context" "fmt" "net/url" + "sort" "strings" log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/timestamppb" integrationsConfig "github.com/netbirdio/management-integrations/integrations/config" + "github.com/netbirdio/netbird/shared/connectionmode" "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" @@ -22,6 +25,11 @@ import ( "github.com/netbirdio/netbird/shared/sshauth" ) +// p2pRetryMaxDisabledSentinel is the wire-format value that signals +// "user-explicit disable backoff" (uint32-max). The 0 wire-value is +// reserved for "not set, use daemon default". Phase 3 of #5989. +const p2pRetryMaxDisabledSentinel = ^uint32(0) + func toNetbirdConfig(config *nbconfig.Config, turnCredentials *Token, relayToken *Token, extraSettings *types.ExtraSettings) *proto.NetbirdConfig { if config == nil { return nil @@ -100,12 +108,49 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set sshConfig.JwtConfig = buildJWTConfig(httpConfig, deviceFlowConfig) } + // Resolve the effective ConnectionMode for this peer. + // Phase 1: account-wide settings only (per-peer / per-group resolution + // follows in Phase 3 / issue #5990). The new ConnectionMode field wins + // over the legacy LazyConnectionEnabled boolean. UNSPECIFIED in Settings + // (i.e. ConnectionMode == nil) falls back to the legacy bool. + resolvedMode := connectionmode.ResolveLegacyLazyBool(settings.LazyConnectionEnabled) + if settings.ConnectionMode != nil { + if m, err := connectionmode.ParseString(*settings.ConnectionMode); err == nil && m != connectionmode.ModeUnspecified { + resolvedMode = m + } + } + + relayTO := uint32(0) + if settings.RelayTimeoutSeconds != nil { + relayTO = *settings.RelayTimeoutSeconds + } + p2pTO := uint32(0) + if settings.P2pTimeoutSeconds != nil { + p2pTO = *settings.P2pTimeoutSeconds + } + p2pRetryMax := uint32(0) + if settings.P2pRetryMaxSeconds != nil { + if *settings.P2pRetryMaxSeconds == 0 { + p2pRetryMax = p2pRetryMaxDisabledSentinel + } else { + p2pRetryMax = *settings.P2pRetryMaxSeconds + } + } + return &proto.PeerConfig{ Address: fmt.Sprintf("%s/%d", peer.IP.String(), netmask), SshConfig: sshConfig, Fqdn: fqdn, RoutingPeerDnsResolutionEnabled: settings.RoutingPeerDNSResolutionEnabled, - LazyConnectionEnabled: settings.LazyConnectionEnabled, + // Send BOTH the new enum (for new clients) and the legacy boolean + // (for old clients). New clients prefer the explicit enum and + // ignore the bool; old clients ignore the unknown enum field + // (proto3 default behaviour) and fall back to the bool. + LazyConnectionEnabled: resolvedMode.ToLazyConnectionEnabled(), + ConnectionMode: resolvedMode.ToProto(), + P2PTimeoutSeconds: p2pTO, + P2PRetryMaxSeconds: p2pRetryMax, + RelayTimeoutSeconds: relayTO, AutoUpdate: &proto.AutoUpdateSettings{ Version: settings.AutoUpdateVersion, AlwaysUpdate: settings.AutoUpdateAlways, @@ -113,7 +158,7 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set } } -func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfig.DeviceAuthorizationFlow, peer *nbpeer.Peer, turnCredentials *Token, relayCredentials *Token, networkMap *types.NetworkMap, dnsName string, checks []*posture.Checks, dnsCache *cache.DNSConfigCache, settings *types.Settings, extraSettings *types.ExtraSettings, peerGroups []string, dnsFwdPort int64) *proto.SyncResponse { +func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfig.DeviceAuthorizationFlow, peer *nbpeer.Peer, turnCredentials *Token, relayCredentials *Token, networkMap *types.NetworkMap, dnsName string, checks []*posture.Checks, dnsCache *cache.DNSConfigCache, settings *types.Settings, extraSettings *types.ExtraSettings, peerGroups []string, dnsFwdPort int64, groupNamesByPeerID map[string][]string) *proto.SyncResponse { response := &proto.SyncResponse{ PeerConfig: toPeerConfig(peer, networkMap.Network, dnsName, settings, httpConfig, deviceFlowConfig, networkMap.EnableSSH), NetworkMap: &proto.NetworkMap{ @@ -131,14 +176,20 @@ func ToSyncResponse(ctx context.Context, config *nbconfig.Config, httpConfig *nb response.NetworkMap.PeerConfig = response.PeerConfig + appendCtx := AppendRemotePeerConfigContext{ + DNSDomain: dnsName, + Cfg: settings, + GroupNamesByPeerID: groupNamesByPeerID, + } + remotePeers := make([]*proto.RemotePeerConfig, 0, len(networkMap.Peers)+len(networkMap.OfflinePeers)) - remotePeers = appendRemotePeerConfig(remotePeers, networkMap.Peers, dnsName) + remotePeers = appendRemotePeerConfig(remotePeers, networkMap.Peers, appendCtx) response.RemotePeers = remotePeers response.NetworkMap.RemotePeers = remotePeers response.RemotePeersIsEmpty = len(remotePeers) == 0 response.NetworkMap.RemotePeersIsEmpty = response.RemotePeersIsEmpty - response.NetworkMap.OfflinePeers = appendRemotePeerConfig(nil, networkMap.OfflinePeers, dnsName) + response.NetworkMap.OfflinePeers = appendRemotePeerConfig(nil, networkMap.OfflinePeers, appendCtx) firewallRules := toProtocolFirewallRules(networkMap.FirewallRules) response.NetworkMap.FirewallRules = firewallRules @@ -195,19 +246,105 @@ func buildAuthorizedUsersProto(ctx context.Context, authorizedUsers map[string]m return hashedUsers, machineUsers } -func appendRemotePeerConfig(dst []*proto.RemotePeerConfig, peers []*nbpeer.Peer, dnsName string) []*proto.RemotePeerConfig { +// AppendRemotePeerConfigContext bundles per-account settings + per-peer +// group lookups so appendRemotePeerConfig stays free of DB calls. +// Callers (in conversion.go) materialise this once per NetworkMap build. +type AppendRemotePeerConfigContext struct { + DNSDomain string + // Cfg is the account-wide configured mode/timeouts. Nil when unavailable. + Cfg *types.Settings + // GroupNamesByPeerID maps a peer ID to its sorted group-name list. + GroupNamesByPeerID map[string][]string +} + +func appendRemotePeerConfig(dst []*proto.RemotePeerConfig, peers []*nbpeer.Peer, c AppendRemotePeerConfigContext) []*proto.RemotePeerConfig { + var cfgConnMode string + var cfgRelayTO, cfgP2pTO, cfgP2pRetryMax uint32 + if c.Cfg != nil { + cfgConnMode = derefStringOrEmpty(c.Cfg.ConnectionMode) + cfgRelayTO = derefUint32OrZero(c.Cfg.RelayTimeoutSeconds) + cfgP2pTO = derefUint32OrZero(c.Cfg.P2pTimeoutSeconds) + cfgP2pRetryMax = derefUint32OrZero(c.Cfg.P2pRetryMaxSeconds) + } + for _, rPeer := range peers { - dst = append(dst, &proto.RemotePeerConfig{ - WgPubKey: rPeer.Key, - AllowedIps: []string{rPeer.IP.String() + "/32"}, - SshConfig: &proto.SSHConfig{SshPubKey: []byte(rPeer.SSHKey)}, - Fqdn: rPeer.FQDN(dnsName), + cfg := &proto.RemotePeerConfig{ + WgPubKey: rPeer.Key, + AllowedIps: []string{rPeer.IP.String() + "/32"}, + SshConfig: &proto.SSHConfig{SshPubKey: []byte(rPeer.SSHKey)}, + Fqdn: rPeer.FQDN(c.DNSDomain), + AgentVersion: rPeer.Meta.WtVersion, - }) + + // Phase 3.7i: effective values from the peer's last self-report. + EffectiveConnectionMode: rPeer.Meta.EffectiveConnectionMode, + EffectiveRelayTimeoutSecs: rPeer.Meta.EffectiveRelayTimeoutSecs, + EffectiveP2PTimeoutSecs: rPeer.Meta.EffectiveP2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: rPeer.Meta.EffectiveP2PRetryMaxSecs, + + // Phase 3.7i: account-wide configured values from Settings. + ConfiguredConnectionMode: cfgConnMode, + ConfiguredRelayTimeoutSecs: cfgRelayTO, + ConfiguredP2PTimeoutSecs: cfgP2pTO, + ConfiguredP2PRetryMaxSecs: cfgP2pRetryMax, + + // Phase 3.7i: server-knowledge fields surfaced to UIs. + Groups: c.GroupNamesByPeerID[rPeer.ID], + } + // nbpeer.Peer.Status is *PeerStatus; nil-guard before accessing. + if rPeer.Status != nil { + if !rPeer.Status.LastSeen.IsZero() { + cfg.LastSeenAtServer = timestamppb.New(rPeer.Status.LastSeen) + } + cfg.LiveOnline = rPeer.Status.Connected + } + // New servers always know per-peer liveness; signal that to new + // clients so they can trust LiveOnline directly instead of + // guessing from the LastSeenAtServer-zero heuristic. Old servers + // leave this field at default (false) and clients fall back. + cfg.ServerLivenessKnown = true + dst = append(dst, cfg) } return dst } +// derefStringOrEmpty returns the pointed-to string or "" for nil. +// Used for *string Settings fields where "" means "account hasn't +// configured a mode; UI shows it as unset". +func derefStringOrEmpty(s *string) string { + if s == nil { + return "" + } + return *s +} + +// derefUint32OrZero returns the pointed-to uint32 or 0 for nil. +// Used for *uint32 Settings fields where 0 means "account hasn't set +// an override; daemon falls back to its built-in default". +func derefUint32OrZero(u *uint32) uint32 { + if u == nil { + return 0 + } + return *u +} + +// BuildGroupNamesByPeerID constructs a peerID → sorted-group-names map +// from the account's Groups in a single pass. Callers pass this to +// ToSyncResponse so that appendRemotePeerConfig can annotate each +// RemotePeerConfig.Groups without any additional DB calls. +func BuildGroupNamesByPeerID(groups map[string]*types.Group) map[string][]string { + result := make(map[string][]string, len(groups)) + for _, g := range groups { + for _, peerID := range g.Peers { + result[peerID] = append(result[peerID], g.Name) + } + } + for peerID := range result { + sort.Strings(result[peerID]) + } + return result +} + // toProtocolDNSConfig converts nbdns.Config to proto.DNSConfig using the cache func toProtocolDNSConfig(update nbdns.Config, cache *cache.DNSConfigCache, forwardPort int64) *proto.DNSConfig { protoUpdate := &proto.DNSConfig{ diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 1e75caf959a..961bea0210e 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -2,6 +2,7 @@ package grpc import ( "fmt" + "net" "net/netip" "reflect" "testing" @@ -12,8 +13,172 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/types" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) +// TestToPeerConfig_ConnectionModeResolution covers Phase 1 of issue #5989: +// the management server resolves the effective ConnectionMode from +// Settings (with the new ConnectionMode field winning over the legacy +// LazyConnectionEnabled boolean), then writes BOTH wire fields so old +// clients (boolean only) and new clients (enum only) see consistent +// behaviour. +func TestToPeerConfig_ConnectionModeResolution(t *testing.T) { + cases := []struct { + name string + settingsMode *string + settingsLazyBool bool + settingsRelayTO *uint32 + settingsP2pTO *uint32 + wantPCMode mgmProto.ConnectionMode + wantPCLazyBool bool + wantPCRelayTO uint32 + wantPCP2pTO uint32 + }{ + { + name: "no settings -> P2P + lazy=false", + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "only legacy lazy=true -> P2P_LAZY + lazy=true", + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p-lazy explicit -> P2P_LAZY + lazy=true", + settingsMode: strPtrTest("p2p-lazy"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + { + name: "ConnectionMode=p2p explicit -> P2P + lazy=false", + settingsMode: strPtrTest("p2p"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode=relay-forced -> RELAY_FORCED + lazy=false (structural compat gap)", + settingsMode: strPtrTest("relay-forced"), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "ConnectionMode wins over conflicting legacy bool", + settingsMode: strPtrTest("relay-forced"), + settingsLazyBool: true, // ignored + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, + wantPCLazyBool: false, + }, + { + name: "RelayTimeout propagates", + settingsMode: strPtrTest("p2p-lazy"), + settingsRelayTO: u32PtrTest(42), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + wantPCRelayTO: 42, + }, + { + name: "P2pTimeout propagates", + settingsMode: strPtrTest("p2p-dynamic"), + settingsP2pTO: u32PtrTest(180), + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, + wantPCLazyBool: false, // p2p-dynamic maps to lazy=false (best-match for old clients) + wantPCP2pTO: 180, + }, + { + name: "Garbage in ConnectionMode falls back to legacy bool", + settingsMode: strPtrTest("not-a-mode"), + settingsLazyBool: true, + wantPCMode: mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, + wantPCLazyBool: true, + }, + } + + // Minimal Network and Peer fixtures shared across cases. + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + settings := &types.Settings{ + LazyConnectionEnabled: c.settingsLazyBool, + ConnectionMode: c.settingsMode, + RelayTimeoutSeconds: c.settingsRelayTO, + P2pTimeoutSeconds: c.settingsP2pTO, + } + pc := toPeerConfig(peer, network, "example.local", settings, nil, nil, false) + + assert.Equal(t, c.wantPCMode, pc.GetConnectionMode(), + "ConnectionMode wire field") + assert.Equal(t, c.wantPCLazyBool, pc.GetLazyConnectionEnabled(), + "LazyConnectionEnabled wire field (backwards-compat)") + assert.Equal(t, c.wantPCRelayTO, pc.GetRelayTimeoutSeconds(), + "RelayTimeoutSeconds wire field") + assert.Equal(t, c.wantPCP2pTO, pc.GetP2PTimeoutSeconds(), + "P2PTimeoutSeconds wire field") + }) + } +} + +func strPtrTest(s string) *string { return &s } +func u32PtrTest(v uint32) *uint32 { return &v } + +// toPeerConfigForTest is a minimal helper that calls toPeerConfig with a +// fixed peer and network fixture, forwarding only the settings argument. +// Used by the P2pRetryMaxSeconds sentinel tests (Phase 3 / #5989). +func toPeerConfigForTest(settings *types.Settings) *mgmProto.PeerConfig { + _, ipnet, _ := net.ParseCIDR("10.0.0.0/16") + network := &types.Network{Net: *ipnet} + peer := &nbpeer.Peer{ + ID: "p1", + Name: "test-peer", + DNSLabel: "test-peer", + IP: net.IPv4(10, 0, 0, 5), + } + return toPeerConfig(peer, network, "example.local", settings, nil, nil, false) +} + +func TestToPeerConfig_P2pRetryMax_NullDB(t *testing.T) { + settings := &types.Settings{ + P2pRetryMaxSeconds: nil, // DB has NULL + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 0 { + t.Errorf("NULL in DB should produce 0 on the wire (= use daemon default), got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_ExplicitDisable(t *testing.T) { + zero := uint32(0) + settings := &types.Settings{ + P2pRetryMaxSeconds: &zero, // user explicitly set 0 + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != ^uint32(0) { + t.Errorf("explicit 0 should map to uint32-max sentinel on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + +func TestToPeerConfig_P2pRetryMax_NormalValue(t *testing.T) { + v := uint32(600) + settings := &types.Settings{ + P2pRetryMaxSeconds: &v, + } + pc := toPeerConfigForTest(settings) + if pc.P2PRetryMaxSeconds != 600 { + t.Errorf("expected 600 on the wire, got %d", pc.P2PRetryMaxSeconds) + } +} + func TestToProtocolDNSConfigWithCache(t *testing.T) { var cache cache.DNSConfigCache diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 0c1611e7f61..1b3adf5ea52 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -31,6 +31,7 @@ import ( nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/job" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/store" @@ -86,6 +87,10 @@ type Server struct { reverseProxyManager rpservice.Manager reverseProxyMu sync.RWMutex + + // Phase 3.7i of #5989: shared peer-connection-map state + peerConnections peer_connections.Store + snapshotRouter *peer_connections.SnapshotRouter } // NewServer creates a new Management server @@ -101,7 +106,19 @@ func NewServer( networkMapController network_map.Controller, oAuthConfigProvider idp.OAuthConfigProvider, sessionStore *auth.SessionStore, + peerConnStore peer_connections.Store, + peerConnRouter *peer_connections.SnapshotRouter, ) (*Server, error) { + // Defensive defaults for Phase 3.7i wiring: production callers pass + // non-nil values built by the BaseServer; some test fixtures pass + // nil. Without these the Sync handler nil-derefs in Register(). + if peerConnStore == nil { + peerConnStore = peer_connections.NewMemoryStore(5 * time.Minute) + } + if peerConnRouter == nil { + peerConnRouter = peer_connections.NewSnapshotRouter() + } + if appMetrics != nil { // update gauge based on number of connected peers which is equal to open gRPC streams err := appMetrics.GRPCMetrics().RegisterConnectedStreams(func() int64 { @@ -149,6 +166,9 @@ func NewServer( syncLim: syncLim, syncLimEnabled: syncLimEnabled, + + peerConnections: peerConnStore, + snapshotRouter: peerConnRouter, }, nil } @@ -422,6 +442,10 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg debouncer := NewUpdateDebouncer(1000 * time.Millisecond) defer debouncer.Stop() + // Phase 3.7i (#5989): register for SnapshotRequest dispatch. + snapshotCh := s.snapshotRouter.Register(peerKey.String()) + defer s.snapshotRouter.Unregister(peerKey.String(), snapshotCh) + for { select { // condition when there are some updates @@ -466,6 +490,22 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg log.WithContext(ctx).Debugf("stream of peer %s has been closed", peerKey.String()) s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return srv.Context().Err() + + // Phase 3.7i (#5989): NEW case — on-demand snapshot request. + // Bypasses the debouncer because dashboard refresh has a + // <3 s end-to-end latency budget. Direct sendUpdate. + case nonce, ok := <-snapshotCh: + if !ok { + continue + } + snapMsg := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + SnapshotRequest: &proto.PeerSnapshotRequest{Nonce: nonce}, + }, + } + if err := s.sendUpdate(ctx, accountID, peerKey, peer, snapMsg, srv, streamStartTime); err != nil { + log.WithContext(ctx).Warnf("send snapshot request to %s: %v", peerKey.String(), err) + } } } } @@ -681,7 +721,11 @@ func extractPeerMeta(ctx context.Context, meta *proto.PeerSystemMeta) nbpeer.Pee BlockInbound: meta.GetFlags().GetBlockInbound(), LazyConnectionEnabled: meta.GetFlags().GetLazyConnectionEnabled(), }, - Files: files, + Files: files, + EffectiveConnectionMode: meta.GetEffectiveConnectionMode(), + EffectiveRelayTimeoutSecs: meta.GetEffectiveRelayTimeoutSecs(), + EffectiveP2PTimeoutSecs: meta.GetEffectiveP2PTimeoutSecs(), + EffectiveP2PRetryMaxSecs: meta.GetEffectiveP2PRetryMaxSecs(), } } @@ -921,7 +965,19 @@ func (s *Server) sendInitialSync(ctx context.Context, peerKey wgtypes.Key, peer return status.Errorf(codes.Internal, "failed to get peer groups %s", err) } - plainResp := ToSyncResponse(ctx, s.config, s.config.HttpConfig, s.config.DeviceAuthorizationFlow, peer, turnToken, relayToken, networkMap, s.networkMapController.GetDNSDomain(settings), postureChecks, nil, settings, settings.Extra, peerGroups, dnsFwdPort) + // Phase 3.7i: build group-names map for RemotePeerConfig annotations. + accountGroups, err := s.accountManager.GetStore().GetAccountGroups(ctx, store.LockingStrengthNone, peer.AccountID) + if err != nil { + log.WithContext(ctx).Warnf("failed to get account groups for peer %s: %v", peer.ID, err) + accountGroups = nil + } + groupsMap := make(map[string]*types.Group, len(accountGroups)) + for _, g := range accountGroups { + groupsMap[g.ID] = g + } + groupNamesByPeerID := BuildGroupNamesByPeerID(groupsMap) + + plainResp := ToSyncResponse(ctx, s.config, s.config.HttpConfig, s.config.DeviceAuthorizationFlow, peer, turnToken, relayToken, networkMap, s.networkMapController.GetDNSDomain(settings), postureChecks, nil, settings, settings.Extra, peerGroups, dnsFwdPort, groupNamesByPeerID) key, err := s.secretsManager.GetWGKey() if err != nil { @@ -1122,6 +1178,22 @@ func (s *Server) SyncMeta(ctx context.Context, req *proto.EncryptedMessage) (*pr return &proto.Empty{}, nil } +// SyncPeerConnections receives a per-peer connection map from a peer. +// Phase 3.7i of #5989. Mirrors SyncMeta's parseRequest pattern: +// decrypts the EncryptedMessage envelope, authenticates the peer pubkey, +// stores the decoded PeerConnectionMap under that pubkey. +func (s *Server) SyncPeerConnections(ctx context.Context, req *proto.EncryptedMessage) (*proto.Empty, error) { + pcm := &proto.PeerConnectionMap{} + peerKey, err := s.parseRequest(ctx, req, pcm) + if err != nil { + return nil, err + } + if s.peerConnections != nil { + s.peerConnections.Put(peerKey.String(), pcm) + } + return &proto.Empty{}, nil +} + func (s *Server) Logout(ctx context.Context, req *proto.EncryptedMessage) (*proto.Empty, error) { log.WithContext(ctx).Debugf("Logout request from peer [%s]", req.WgPubKey) start := time.Now() diff --git a/management/server/account.go b/management/server/account.go index 4b71ab486eb..7ba5e709708 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -371,6 +371,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.handleRoutingPeerDNSResolutionSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleLazyConnectionSettings(ctx, oldSettings, newSettings, userID, accountID) + am.handleConnectionModeSettings(ctx, oldSettings, newSettings, userID, accountID) am.handlePeerLoginExpirationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleGroupsPropagationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleAutoUpdateVersionSettings(ctx, oldSettings, newSettings, userID, accountID) @@ -455,6 +456,72 @@ func (am *DefaultAccountManager) handleLazyConnectionSettings(ctx context.Contex } } +// handleConnectionModeSettings emits one audit event per changed Phase-1 +// connection-mode setting (mode, relay timeout, p2p timeout). Each event +// carries old/new values in the meta payload so administrators can audit +// the full transition. NULL transitions show as empty string / 0 in the +// meta — chosen over a sentinel so the frontend can render uniformly. +func (am *DefaultAccountManager) handleConnectionModeSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { + if !equalStringPtr(oldSettings.ConnectionMode, newSettings.ConnectionMode) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountConnectionModeChanged, map[string]any{ + "old": derefStringPtr(oldSettings.ConnectionMode), + "new": derefStringPtr(newSettings.ConnectionMode), + }) + } + if !equalUint32Ptr(oldSettings.RelayTimeoutSeconds, newSettings.RelayTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountRelayTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.RelayTimeoutSeconds), + "new": derefUint32Ptr(newSettings.RelayTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pTimeoutSeconds, newSettings.P2pTimeoutSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pTimeoutChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pTimeoutSeconds), + "new": derefUint32Ptr(newSettings.P2pTimeoutSeconds), + }) + } + if !equalUint32Ptr(oldSettings.P2pRetryMaxSeconds, newSettings.P2pRetryMaxSeconds) { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountP2pRetryMaxChanged, map[string]any{ + "old": derefUint32Ptr(oldSettings.P2pRetryMaxSeconds), + "new": derefUint32Ptr(newSettings.P2pRetryMaxSeconds), + }) + } +} + +func equalStringPtr(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32Ptr(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func derefStringPtr(p *string) string { + if p == nil { + return "" + } + return *p +} + +func derefUint32Ptr(p *uint32) uint32 { + if p == nil { + return 0 + } + return *p +} + func (am *DefaultAccountManager) handlePeerLoginExpirationSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { if oldSettings.PeerLoginExpirationEnabled != newSettings.PeerLoginExpirationEnabled { event := activity.AccountPeerLoginExpirationEnabled diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 626ed222dfb..7c779dbe49f 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -104,6 +104,10 @@ type Manager interface { GetDNSSettings(ctx context.Context, accountID string, userID string) (*types.DNSSettings, error) SaveDNSSettings(ctx context.Context, accountID string, userID string, dnsSettingsToSave *types.DNSSettings) error GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + // GetPeerByPubKey returns the peer with the given WireGuard public key from + // the given account. Phase 3.7i of #5989 — used by REST handlers to enrich + // PeerConnectionMap entries with FQDNs. + GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) UpdateAccountSettings(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) UpdateAccountOnboarding(ctx context.Context, accountID, userID string, newOnboarding *types.AccountOnboarding) (*types.AccountOnboarding, error) LoginPeer(ctx context.Context, login types.PeerLogin) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) // used by peer gRPC API diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index 8f3b22eccff..5f2721ff58e 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -900,6 +900,21 @@ func (mr *MockManagerMockRecorder) GetPeer(ctx, accountID, peerID, userID interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeer", reflect.TypeOf((*MockManager)(nil).GetPeer), ctx, accountID, peerID, userID) } +// GetPeerByPubKey mocks base method. Phase 3.7i of #5989. +func (m *MockManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerByPubKey", ctx, accountID, pubKey) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerByPubKey indicates an expected call of GetPeerByPubKey. +func (mr *MockManagerMockRecorder) GetPeerByPubKey(ctx, accountID, pubKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerByPubKey", reflect.TypeOf((*MockManager)(nil).GetPeerByPubKey), ctx, accountID, pubKey) +} + // GetPeerGroups mocks base method. func (m *MockManager) GetPeerGroups(ctx context.Context, accountID, peerID string) ([]*types.Group, error) { m.ctrl.T.Helper() diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index ddc3e00c38d..8b09a74b182 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -232,6 +232,19 @@ const ( // DomainValidated indicates that a custom domain was validated DomainValidated Activity = 120 + // AccountConnectionModeChanged indicates the account-wide ConnectionMode + // setting was changed (Phase 1 of issue #5989). + AccountConnectionModeChanged Activity = 121 + // AccountRelayTimeoutChanged indicates the account-wide RelayTimeoutSeconds + // setting was changed. + AccountRelayTimeoutChanged Activity = 122 + // AccountP2pTimeoutChanged indicates the account-wide P2pTimeoutSeconds + // setting was changed. + AccountP2pTimeoutChanged Activity = 123 + // AccountP2pRetryMaxChanged indicates the account-wide P2pRetryMaxSeconds + // setting was modified (Phase 3 of #5989). + AccountP2pRetryMaxChanged Activity = 124 + AccountDeleted Activity = 99999 ) @@ -335,6 +348,11 @@ var activityMap = map[Activity]Code{ AccountLazyConnectionEnabled: {"Account lazy connection enabled", "account.setting.lazy.connection.enable"}, AccountLazyConnectionDisabled: {"Account lazy connection disabled", "account.setting.lazy.connection.disable"}, + AccountConnectionModeChanged: {"Account connection mode changed", "account.setting.connection_mode.change"}, + AccountRelayTimeoutChanged: {"Account relay timeout changed", "account.setting.relay_timeout.change"}, + AccountP2pTimeoutChanged: {"Account p2p timeout changed", "account.setting.p2p_timeout.change"}, + AccountP2pRetryMaxChanged: {"Account p2p retry max changed", "account.setting.p2p_retry_max.change"}, + AccountNetworkRangeUpdated: {"Account network range updated", "account.network.range.update"}, PeerIPUpdated: {"Peer IP updated", "peer.ip.update"}, diff --git a/management/server/http/handler.go b/management/server/http/handler.go index b9ea605d36f..0c596238732 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -20,6 +20,7 @@ import ( nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" idpmanager "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/management-integrations/integrations" @@ -47,6 +48,7 @@ import ( "github.com/netbirdio/netbird/management/server/http/handlers/idp" "github.com/netbirdio/netbird/management/server/http/handlers/instance" "github.com/netbirdio/netbird/management/server/http/handlers/networks" + peer_connections_http "github.com/netbirdio/netbird/management/server/http/handlers/peer_connections" "github.com/netbirdio/netbird/management/server/http/handlers/peers" "github.com/netbirdio/netbird/management/server/http/handlers/policies" "github.com/netbirdio/netbird/management/server/http/handlers/routes" @@ -59,13 +61,25 @@ import ( nbnetworks "github.com/netbirdio/netbird/management/server/networks" "github.com/netbirdio/netbird/management/server/networks/resources" "github.com/netbirdio/netbird/management/server/networks/routers" + nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/telemetry" ) const apiPrefix = "/api" +// APIHandler wraps the HTTP router and holds shared state for all HTTP handlers. +// The peerConnections and snapshotRouter fields are constructed once in boot.go +// and shared with the gRPC server so both sides see the same in-memory state. +// Phase 3.7i of #5989; HTTP routes that consume these are registered in Task 4.2. +type APIHandler struct { + http.Handler + + peerConnections peer_connections.Store + snapshotRouter *peer_connections.SnapshotRouter +} + // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter, peerConnStore peer_connections.Store, peerConnRouter *peer_connections.SnapshotRouter) (*APIHandler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -124,6 +138,16 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks accounts.AddEndpoints(accountManager, settingsManager, router) peers.AddEndpoints(accountManager, router, networkMapController, permissionsManager) + + // Phase 3.7i of #5989: peer connection-map REST routes. + peerConnHandler := peer_connections_http.NewHandler( + peerConnStore, + &pcAccountManagerAdapter{am: accountManager, nmc: networkMapController}, + peerConnRouter, + ) + router.HandleFunc("/peers/{peerId}/connections", peerConnHandler.GetPeerConnections).Methods("GET", "OPTIONS") + router.HandleFunc("/peers/{peerId}/connections/refresh", peerConnHandler.PostRefresh).Methods("POST", "OPTIONS") + users.AddEndpoints(accountManager, router) users.AddInvitesEndpoints(accountManager, router) users.AddPublicInvitesEndpoints(accountManager, router) @@ -155,5 +179,42 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks rootRouter.PathPrefix("/oauth2").Handler(corsMiddleware.Handler(embeddedIdP.Handler())) } - return rootRouter, nil + return &APIHandler{ + Handler: rootRouter, + peerConnections: peerConnStore, + snapshotRouter: peerConnRouter, + }, nil +} + +// pcAccountManagerAdapter bridges the real account.Manager into the small +// interface peer_connections.Handler uses. Phase 3.7i of #5989. +type pcAccountManagerAdapter struct { + am account.Manager + nmc network_map.Controller +} + +func (a *pcAccountManagerAdapter) GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) { + return a.am.GetPeer(ctx, accountID, peerID, userID) +} + +func (a *pcAccountManagerAdapter) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + return a.am.GetPeerByPubKey(ctx, accountID, pubKey) +} + +// GetDNSDomain resolves the configured DNS domain for the account. +// It reads the account settings and delegates to the networkMapController +// which applies the global default when the account has no custom domain. +// Falls back to "" on error — FQDN enrichment in the handler is best-effort. +func (a *pcAccountManagerAdapter) GetDNSDomain(ctx context.Context, accountID string) string { + settings, err := a.am.GetAccountSettings(ctx, accountID, "internal") + if err != nil { + return "" + } + if a.nmc == nil { + if settings != nil { + return settings.DNSDomain + } + return "" + } + return a.nmc.GetDNSDomain(settings) } diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index cc5567e3db6..f4c512ffb5e 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -215,6 +215,29 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS if req.Settings.LazyConnectionEnabled != nil { returnSettings.LazyConnectionEnabled = *req.Settings.LazyConnectionEnabled } + if req.Settings.ConnectionMode != nil { + modeStr := string(*req.Settings.ConnectionMode) + if !req.Settings.ConnectionMode.Valid() { + return nil, fmt.Errorf("invalid connection_mode %q", modeStr) + } + // Persist as the canonical string. Clients clear an override by + // sending JSON null (which lands here as a nil pointer and skips + // this whole block, leaving the existing value untouched). + s := modeStr + returnSettings.ConnectionMode = &s + } + if req.Settings.P2pTimeoutSeconds != nil { + v := uint32(*req.Settings.P2pTimeoutSeconds) + returnSettings.P2pTimeoutSeconds = &v + } + if req.Settings.P2pRetryMaxSeconds != nil { + v := uint32(*req.Settings.P2pRetryMaxSeconds) + returnSettings.P2pRetryMaxSeconds = &v + } + if req.Settings.RelayTimeoutSeconds != nil { + v := uint32(*req.Settings.RelayTimeoutSeconds) + returnSettings.RelayTimeoutSeconds = &v + } if req.Settings.AutoUpdateVersion != nil { _, err := goversion.NewSemver(*req.Settings.AutoUpdateVersion) if *req.Settings.AutoUpdateVersion == autoUpdateLatestVersion || @@ -349,6 +372,34 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A PeerExposeEnabled: settings.PeerExposeEnabled, PeerExposeGroups: settings.PeerExposeGroups, LazyConnectionEnabled: &settings.LazyConnectionEnabled, + ConnectionMode: func() *api.AccountSettingsConnectionMode { + if settings.ConnectionMode == nil { + return nil + } + v := api.AccountSettingsConnectionMode(*settings.ConnectionMode) + return &v + }(), + P2pTimeoutSeconds: func() *int64 { + if settings.P2pTimeoutSeconds == nil { + return nil + } + v := int64(*settings.P2pTimeoutSeconds) + return &v + }(), + P2pRetryMaxSeconds: func() *int64 { + if settings.P2pRetryMaxSeconds == nil { + return nil + } + v := int64(*settings.P2pRetryMaxSeconds) + return &v + }(), + RelayTimeoutSeconds: func() *int64 { + if settings.RelayTimeoutSeconds == nil { + return nil + } + v := int64(*settings.RelayTimeoutSeconds) + return &v + }(), DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, AutoUpdateAlways: &settings.AutoUpdateAlways, diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index 739dfe2f655..fc61ada712b 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -336,3 +336,79 @@ func TestAccounts_AccountsHandler(t *testing.T) { }) } } + +func TestAccountsHandler_PutSettings_P2pRetryMax(t *testing.T) { + accountID := "test_account" + adminUser := types.NewAdminUser("test_user") + + sr := func(v string) *string { return &v } + br := func(v bool) *bool { return &v } + ir := func(v int64) *int64 { return &v } + + handler := initAccountsTestData(t, &types.Account{ + Id: accountID, + Domain: "hotmail.com", + Network: types.NewNetwork(), + Users: map[string]*types.User{ + adminUser.Id: adminUser, + }, + Settings: &types.Settings{ + PeerLoginExpirationEnabled: false, + PeerLoginExpiration: time.Hour, + RegularUsersViewBlocked: false, + }, + }) + + recorder := httptest.NewRecorder() + req := httptest.NewRequest( + http.MethodPut, + "/api/accounts/"+accountID, + bytes.NewBufferString(`{"settings": {"peer_login_expiration": 3600, "peer_login_expiration_enabled": false, "p2p_retry_max_seconds": 600}, "onboarding": {"onboarding_flow_pending": true, "signup_form_pending": true}}`), + ) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: adminUser.Id, + AccountId: accountID, + Domain: "hotmail.com", + }) + + router := mux.NewRouter() + router.HandleFunc("/api/accounts/{accountId}", handler.updateAccount).Methods("PUT") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + if status := recorder.Code; status != http.StatusOK { + t.Fatalf("handler returned wrong status code: got %v want %v", status, http.StatusOK) + } + + content, err := io.ReadAll(res.Body) + if err != nil { + t.Fatalf("could not read response body: %v", err) + } + + var actual api.Account + if err = json.Unmarshal(content, &actual); err != nil { + t.Fatalf("response is not valid JSON: %v", err) + } + + expectedSettings := api.AccountSettings{ + PeerLoginExpiration: 3600, + PeerLoginExpirationEnabled: false, + GroupsPropagationEnabled: br(false), + JwtGroupsClaimName: sr(""), + JwtGroupsEnabled: br(false), + JwtAllowGroups: &[]string{}, + RegularUsersViewBlocked: false, + RoutingPeerDnsResolutionEnabled: br(false), + LazyConnectionEnabled: br(false), + DnsDomain: sr(""), + AutoUpdateAlways: br(false), + AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), + P2pRetryMaxSeconds: ir(600), + } + + assert.Equal(t, expectedSettings, actual.Settings) +} diff --git a/management/server/http/handlers/peer_connections/handler.go b/management/server/http/handlers/peer_connections/handler.go new file mode 100644 index 00000000000..de3a5c64210 --- /dev/null +++ b/management/server/http/handlers/peer_connections/handler.go @@ -0,0 +1,204 @@ +package peer_connections + +import ( + "context" + "encoding/json" + "net/http" + "strconv" + "sync/atomic" + "time" + + "github.com/gorilla/mux" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/peer_connections" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// AccountManager is the slice of the existing AccountManager interface +// this handler needs. Phase 3.7i of #5989. +type AccountManager interface { + GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + GetDNSDomain(ctx context.Context, accountID string) string + GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) +} + +// SnapshotRequester triggers a SnapshotRequest on the peer's active +// Sync server-stream. Phase 3.7i of #5989. +type SnapshotRequester interface { + Request(peerPubKey string, nonce uint64) bool +} + +type Handler struct { + store peer_connections.Store + account AccountManager + router SnapshotRequester + nonce atomic.Uint64 +} + +func NewHandler(store peer_connections.Store, account AccountManager, router SnapshotRequester) *Handler { + return &Handler{store: store, account: account, router: router} +} + +type apiEntry struct { + RemotePubkey string `json:"remote_pubkey"` + RemoteFQDN string `json:"remote_fqdn,omitempty"` + ConnType string `json:"conn_type"` + LastHandshake string `json:"last_handshake,omitempty"` + LatencyMs uint32 `json:"latency_ms,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RelayServer string `json:"relay_server,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty"` +} + +type apiResponse struct { + PeerPubkey string `json:"peer_pubkey"` + Seq uint64 `json:"seq"` + FullSnapshot bool `json:"full_snapshot"` + InResponseTo uint64 `json:"in_response_to_nonce,omitempty"` + Entries []apiEntry `json:"entries"` +} + +type refreshResponse struct { + RefreshToken uint64 `json:"refresh_token"` + CachedMap *apiResponse `json:"cached_map,omitempty"` + // Dispatched is true when the snapshot request was actually delivered + // to an active Sync stream for this peer. False means the peer has + // no live stream (offline / between connections / older daemon + // without snapshot-request support) and the caller can decide whether + // to retry or fall back to the cached map. + Dispatched bool `json:"dispatched"` +} + +// GetPeerConnections handles GET /api/peers/{peerId}/connections. +// 401 missing/invalid auth, 404 peer not found, 200 with body. +// ?since=N blocks up to 5 s for fresh data. +func (h *Handler) GetPeerConnections(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + peerID := mux.Vars(r)["peerId"] + peer, err := h.account.GetPeer(r.Context(), userAuth.AccountId, peerID, userAuth.UserId) + if err != nil { + http.Error(w, "peer not found", http.StatusNotFound) + return + } + + pubkey := peer.Key + since, _ := strconv.ParseUint(r.URL.Query().Get("since"), 10, 64) + + var ( + m *mgmProto.PeerConnectionMap + ok bool + ) + if since > 0 { + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + m, ok = h.store.GetWithNonceCheck(pubkey, since) + if ok { + break + } + select { + case <-ctx.Done(): + m, ok = h.store.Get(pubkey) + goto done + case <-ticker.C: + } + } + } else { + m, ok = h.store.Get(pubkey) + } +done: + if !ok { + http.Error(w, "no connection data yet for this peer", http.StatusNotFound) + return + } + + dnsDomain := h.account.GetDNSDomain(r.Context(), userAuth.AccountId) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(h.buildResponse(r.Context(), userAuth.AccountId, dnsDomain, pubkey, m)) +} + +// PostRefresh handles POST /api/peers/{peerId}/connections/refresh. +func (h *Handler) PostRefresh(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + peerID := mux.Vars(r)["peerId"] + peer, err := h.account.GetPeer(r.Context(), userAuth.AccountId, peerID, userAuth.UserId) + if err != nil { + http.Error(w, "peer not found", http.StatusNotFound) + return + } + + pubkey := peer.Key + nonce := h.nonce.Add(1) + dispatched := false + if h.router != nil { + dispatched = h.router.Request(pubkey, nonce) + } + + dnsDomain := h.account.GetDNSDomain(r.Context(), userAuth.AccountId) + resp := refreshResponse{RefreshToken: nonce, Dispatched: dispatched} + if cached, ok := h.store.Get(pubkey); ok { + ar := h.buildResponse(r.Context(), userAuth.AccountId, dnsDomain, pubkey, cached) + resp.CachedMap = &ar + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + _ = json.NewEncoder(w).Encode(resp) +} + +func (h *Handler) buildResponse(ctx context.Context, accountID, dnsDomain, pubkey string, m *mgmProto.PeerConnectionMap) apiResponse { + resp := apiResponse{ + PeerPubkey: pubkey, + Seq: m.GetSeq(), + FullSnapshot: m.GetFullSnapshot(), + InResponseTo: m.GetInResponseToNonce(), + Entries: make([]apiEntry, 0, len(m.GetEntries())), + } + for _, e := range m.GetEntries() { + entry := apiEntry{ + RemotePubkey: e.GetRemotePubkey(), + ConnType: connTypeToStr(e.GetConnType()), + LatencyMs: e.GetLatencyMs(), + Endpoint: e.GetEndpoint(), + RelayServer: e.GetRelayServer(), + RxBytes: e.GetRxBytes(), + TxBytes: e.GetTxBytes(), + } + if hs := e.GetLastHandshake(); hs != nil && hs.IsValid() { + entry.LastHandshake = hs.AsTime().Format(time.RFC3339) + } + // Enrich remote_fqdn via account-peer lookup (best-effort). + if rPeer, err := h.account.GetPeerByPubKey(ctx, accountID, e.GetRemotePubkey()); err == nil && rPeer != nil { + entry.RemoteFQDN = rPeer.FQDN(dnsDomain) + } + resp.Entries = append(resp.Entries, entry) + } + return resp +} + +func connTypeToStr(ct mgmProto.ConnType) string { + switch ct { + case mgmProto.ConnType_CONN_TYPE_P2P: + return "p2p" + case mgmProto.ConnType_CONN_TYPE_RELAYED: + return "relayed" + case mgmProto.ConnType_CONN_TYPE_CONNECTING: + return "connecting" + case mgmProto.ConnType_CONN_TYPE_IDLE: + return "idle" + default: + return "unspecified" + } +} diff --git a/management/server/http/handlers/peer_connections/handler_test.go b/management/server/http/handlers/peer_connections/handler_test.go new file mode 100644 index 00000000000..76ca4af6506 --- /dev/null +++ b/management/server/http/handlers/peer_connections/handler_test.go @@ -0,0 +1,137 @@ +package peer_connections + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/peer_connections" + "github.com/netbirdio/netbird/shared/auth" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type fakeAM struct { + peers map[string]*nbpeer.Peer // peerID → Peer + peersByKey map[string]*nbpeer.Peer // pubkey → Peer + allowedAcc string + dnsDomain string +} + +func (a *fakeAM) GetPeer(_ context.Context, accountID, peerID, _ string) (*nbpeer.Peer, error) { + if a.allowedAcc != "" && a.allowedAcc != accountID { + return nil, errors.New("not found") + } + p, ok := a.peers[peerID] + if !ok { + return nil, errors.New("not found") + } + return p, nil +} + +func (a *fakeAM) GetPeerByPubKey(_ context.Context, _, pubKey string) (*nbpeer.Peer, error) { + p, ok := a.peersByKey[pubKey] + if !ok { + return nil, errors.New("not found") + } + return p, nil +} + +func (a *fakeAM) GetDNSDomain(_ context.Context, _ string) string { return a.dnsDomain } + +type fakeRouter struct{ calls int } + +func (f *fakeRouter) Request(_ string, _ uint64) bool { f.calls++; return true } + +func authedReq(method, target, accountID, userID string) *http.Request { + r := httptest.NewRequest(method, target, nil) + return nbcontext.SetUserAuthInRequest(r, auth.UserAuth{AccountId: accountID, UserId: userID}) +} + +func TestHandler_GetPeerConnections_Returns200WithCachedData(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + store.Put("PUBKEY-A", &mgmProto.PeerConnectionMap{ + Seq: 1, + FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "PUBKEY-B", ConnType: mgmProto.ConnType_CONN_TYPE_P2P, LatencyMs: 12}}, + }) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + peersByKey: map[string]*nbpeer.Peer{"PUBKEY-B": {ID: "peerB-id", Key: "PUBKEY-B", AccountID: "acc1"}}, + dnsDomain: "test.example", + } + h := NewHandler(store, am, nil) + + r := authedReq("GET", "/api/peers/peerA-id/connections", "acc1", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusOK { + t.Fatalf("want 200, got %d (body %s)", w.Code, w.Body.String()) + } + if !strings.Contains(w.Body.String(), "PUBKEY-B") { + t.Errorf("want PUBKEY-B in body, got %s", w.Body.String()) + } +} + +func TestHandler_GetPeerConnections_401WithoutAuth(t *testing.T) { + h := NewHandler(peer_connections.NewMemoryStore(time.Hour), &fakeAM{}, nil) + r := httptest.NewRequest("GET", "/api/peers/peerA-id/connections", nil) + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusUnauthorized { + t.Fatalf("want 401, got %d", w.Code) + } +} + +func TestHandler_GetPeerConnections_404WhenPeerNotInAccount(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + allowedAcc: "acc1", + } + h := NewHandler(store, am, nil) + // Authed as different account. + r := authedReq("GET", "/api/peers/peerA-id/connections", "acc2", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.GetPeerConnections(w, r) + if w.Code != http.StatusNotFound { + t.Fatalf("want 404, got %d", w.Code) + } +} + +func TestHandler_PostRefresh_Returns202WithToken(t *testing.T) { + store := peer_connections.NewMemoryStore(time.Hour) + am := &fakeAM{ + peers: map[string]*nbpeer.Peer{"peerA-id": {ID: "peerA-id", Key: "PUBKEY-A", AccountID: "acc1"}}, + } + router := &fakeRouter{} + h := NewHandler(store, am, router) + r := authedReq("POST", "/api/peers/peerA-id/connections/refresh", "acc1", "user1") + r = mux.SetURLVars(r, map[string]string{"peerId": "peerA-id"}) + w := httptest.NewRecorder() + h.PostRefresh(w, r) + if w.Code != http.StatusAccepted { + t.Fatalf("want 202, got %d", w.Code) + } + var body refreshResponse + if err := json.NewDecoder(w.Body).Decode(&body); err != nil { + t.Fatal(err) + } + if body.RefreshToken == 0 { + t.Error("want non-zero refresh_token") + } + if router.calls != 1 { + t.Errorf("want 1 SnapshotRequester call, got %d", router.calls) + } +} diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 1a8b83c7eed..d3ba49a1c7e 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -135,7 +135,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } @@ -264,7 +264,7 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 1b77ea3358f..a8df086c808 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -391,7 +391,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil, nil, nil, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index f1d49193cdd..3eea1b5a594 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -257,6 +257,8 @@ func startServer( networkMapController, nil, nil, + nil, + nil, ) if err != nil { t.Fatalf("failed creating management server: %v", err) diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index ac4d0c6d671..44231a270d9 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -94,6 +94,7 @@ type MockAccountManager struct { GetDNSSettingsFunc func(ctx context.Context, accountID, userID string) (*types.DNSSettings, error) SaveDNSSettingsFunc func(ctx context.Context, accountID, userID string, dnsSettingsToSave *types.DNSSettings) error GetPeerFunc func(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) + GetPeerByPubKeyFunc func(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) UpdateAccountSettingsFunc func(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) LoginPeerFunc func(ctx context.Context, login types.PeerLogin) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) SyncPeerFunc func(ctx context.Context, sync types.PeerSync, accountID string) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) @@ -820,6 +821,15 @@ func (am *MockAccountManager) GetPeer(ctx context.Context, accountID, peerID, us return nil, status.Errorf(codes.Unimplemented, "method GetPeer is not implemented") } +// GetPeerByPubKey mocks GetPeerByPubKey of the AccountManager interface. +// Phase 3.7i of #5989. +func (am *MockAccountManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + if am.GetPeerByPubKeyFunc != nil { + return am.GetPeerByPubKeyFunc(ctx, accountID, pubKey) + } + return nil, status.Errorf(codes.Unimplemented, "method GetPeerByPubKey is not implemented") +} + // UpdateAccountSettings mocks UpdateAccountSettings of the AccountManager interface func (am *MockAccountManager) UpdateAccountSettings(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) { if am.UpdateAccountSettingsFunc != nil { diff --git a/management/server/peer.go b/management/server/peer.go index 25c6ecd8c57..f79b9ee6cca 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -1222,6 +1222,20 @@ func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, return nil, status.Errorf(status.Internal, "user %s has no access to peer %s under account %s", userID, peer.ID, accountID) } +// GetPeerByPubKey returns the peer with the given WireGuard public key from +// the given account. Phase 3.7i of #5989 — used by REST handlers to enrich +// PeerConnectionMap entries with FQDNs. +func (am *DefaultAccountManager) GetPeerByPubKey(ctx context.Context, accountID, pubKey string) (*nbpeer.Peer, error) { + p, err := am.Store.GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, pubKey) + if err != nil { + return nil, err + } + if p.AccountID != accountID { + return nil, fmt.Errorf("peer with pubkey %s not in account %s", pubKey, accountID) + } + return p, nil +} + // UpdateAccountPeers updates all peers that belong to an account. // Should be called when changes have to be synced to peers. func (am *DefaultAccountManager) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { diff --git a/management/server/peer/peer.go b/management/server/peer/peer.go index db392ddda49..2b2b08c6c59 100644 --- a/management/server/peer/peer.go +++ b/management/server/peer/peer.go @@ -138,6 +138,13 @@ type PeerSystemMeta struct { //nolint:revive Environment Environment `gorm:"serializer:json"` Flags Flags `gorm:"serializer:json"` Files []File `gorm:"serializer:json"` + + // Phase 3.7i (#5989): peer-self-reported runtime mode/timeouts. Stored + // alongside Hostname/Kernel/etc as meta. Empty when peer pre-dates 3.7i. + EffectiveConnectionMode string `json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `json:"effective_p2p_retry_max_secs,omitempty"` } func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool { @@ -182,6 +189,10 @@ func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool { p.SystemManufacturer == other.SystemManufacturer && p.Environment.Cloud == other.Environment.Cloud && p.Environment.Platform == other.Environment.Platform && + p.EffectiveConnectionMode == other.EffectiveConnectionMode && + p.EffectiveRelayTimeoutSecs == other.EffectiveRelayTimeoutSecs && + p.EffectiveP2PTimeoutSecs == other.EffectiveP2PTimeoutSecs && + p.EffectiveP2PRetryMaxSecs == other.EffectiveP2PRetryMaxSecs && p.Flags.isEqual(other.Flags) } diff --git a/management/server/peer/peer_test.go b/management/server/peer/peer_test.go index 1aa3f6ffcea..187f0648c58 100644 --- a/management/server/peer/peer_test.go +++ b/management/server/peer/peer_test.go @@ -141,3 +141,12 @@ func TestFlags_IsEqual(t *testing.T) { }) } } + +func TestPeerSystemMeta_isEqual_ChecksEffectiveFields(t *testing.T) { + base := PeerSystemMeta{Hostname: "h", EffectiveConnectionMode: "p2p-dynamic"} + other := base + other.EffectiveConnectionMode = "p2p" + if base.isEqual(other) { + t.Error("isEqual should return false when EffectiveConnectionMode differs") + } +} diff --git a/management/server/peer_connections/snapshot_router.go b/management/server/peer_connections/snapshot_router.go new file mode 100644 index 00000000000..2daa2b86d8c --- /dev/null +++ b/management/server/peer_connections/snapshot_router.go @@ -0,0 +1,73 @@ +package peer_connections + +import "sync" + +// SnapshotRouter holds per-peer-pubkey send-channels so REST handlers +// can inject a SnapshotRequest into the active Sync server-stream. +// Stream owners (mgmt grpc handleUpdates) Register on stream-start and +// Unregister on stream-close. Phase 3.7i of #5989. +type SnapshotRouter struct { + mu sync.Mutex + channels map[string]chan uint64 +} + +func NewSnapshotRouter() *SnapshotRouter { + return &SnapshotRouter{channels: make(map[string]chan uint64)} +} + +// Register returns a buffered channel the stream owner reads from to +// receive snapshot-request nonces. The returned channel is the token +// the caller must pass to Unregister so a stale stream cannot tear +// down a fresh stream's channel after a quick reconnect. +func (r *SnapshotRouter) Register(peerPubKey string) <-chan uint64 { + r.mu.Lock() + defer r.mu.Unlock() + ch := make(chan uint64, 4) + if old, ok := r.channels[peerPubKey]; ok { + // A second concurrent stream for the same peer (e.g. fast + // reconnect) — close the previous channel so its goroutine + // exits cleanly, then install the new one. + close(old) + } + r.channels[peerPubKey] = ch + return ch +} + +// Unregister closes the given channel (token returned from Register) +// and removes the peer from the router only if that channel is still +// the live one. A stale stream calling Unregister after a fresh stream +// has registered must not tear down the new stream's channel. +// Idempotent. +func (r *SnapshotRouter) Unregister(peerPubKey string, token <-chan uint64) { + r.mu.Lock() + defer r.mu.Unlock() + current, ok := r.channels[peerPubKey] + if !ok { + return + } + if (<-chan uint64)(current) != token { + // A newer Register replaced our channel; that newer Register + // already closed our old channel, so nothing to do here. + return + } + close(current) + delete(r.channels, peerPubKey) +} + +// Request enqueues a nonce for the given peer's snapshot channel. +// Returns true if delivered, false if no active stream for that peer +// or the channel is full (channel capacity 4). +func (r *SnapshotRouter) Request(peerPubKey string, nonce uint64) bool { + r.mu.Lock() + defer r.mu.Unlock() + ch, ok := r.channels[peerPubKey] + if !ok { + return false + } + select { + case ch <- nonce: + return true + default: + return false + } +} diff --git a/management/server/peer_connections/snapshot_router_test.go b/management/server/peer_connections/snapshot_router_test.go new file mode 100644 index 00000000000..617e07dbb0c --- /dev/null +++ b/management/server/peer_connections/snapshot_router_test.go @@ -0,0 +1,62 @@ +package peer_connections + +import "testing" + +func TestSnapshotRouter_RegisterAndRequest(t *testing.T) { + r := NewSnapshotRouter() + ch := r.Register("peerA-pubkey") + if !r.Request("peerA-pubkey", 42) { + t.Fatal("Request should return true for registered peer") + } + select { + case n := <-ch: + if n != 42 { + t.Errorf("want nonce 42, got %d", n) + } + default: + t.Fatal("nonce was not delivered to channel") + } +} + +func TestSnapshotRouter_RequestUnregisteredPeer(t *testing.T) { + r := NewSnapshotRouter() + if r.Request("ghost", 1) { + t.Error("Request for unregistered peer should return false") + } +} + +func TestSnapshotRouter_UnregisterClosesChannel(t *testing.T) { + r := NewSnapshotRouter() + ch := r.Register("peerA") + r.Unregister("peerA", ch) + if _, ok := <-ch; ok { + t.Error("channel should be closed after Unregister") + } +} + +func TestSnapshotRouter_StaleUnregisterDoesNotEvictNewStream(t *testing.T) { + r := NewSnapshotRouter() + old := r.Register("peerA") + // Second Register simulates a fast reconnect: it must close the + // previous channel and replace it. + fresh := r.Register("peerA") + if _, ok := <-old; ok { + t.Error("old channel should be closed when a second Register comes in") + } + // Stale stream calling Unregister with the (now-closed) old token + // must not touch the fresh channel. + r.Unregister("peerA", old) + select { + case _, ok := <-fresh: + if !ok { + t.Error("fresh channel must not be closed by stale Unregister") + } + default: + // expected: channel still open and empty + } + // Proper Unregister with the fresh token tears it down. + r.Unregister("peerA", fresh) + if _, ok := <-fresh; ok { + t.Error("fresh channel should be closed after its own Unregister") + } +} diff --git a/management/server/peer_connections/store.go b/management/server/peer_connections/store.go new file mode 100644 index 00000000000..7c264cae929 --- /dev/null +++ b/management/server/peer_connections/store.go @@ -0,0 +1,137 @@ +package peer_connections + +import ( + "sync" + "time" + + "google.golang.org/protobuf/proto" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Clock is the time source MemoryStore consults. Production passes +// realClock{}; tests inject fakeClock to control TTL deterministically. +// Phase 3.7i of #5989. +type Clock interface { + Now() time.Time +} + +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// Store is the interface peer-connections-map storage implementations +// must satisfy. Phase 3.7i ships only MemoryStore. RedisStore is a +// future possibility behind the same interface (deferred). +type Store interface { + Put(peerPubKey string, m *mgmProto.PeerConnectionMap) + Get(peerPubKey string) (*mgmProto.PeerConnectionMap, bool) + GetWithNonceCheck(peerPubKey string, sinceNonce uint64) (*mgmProto.PeerConnectionMap, bool) +} + +// MemoryStore is the in-memory Store implementation. Phase 3.7i. +type MemoryStore struct { + ttl time.Duration + clock Clock + mu sync.Mutex + maps map[string]*memEntry +} + +type memEntry struct { + m *mgmProto.PeerConnectionMap + updatedAt time.Time +} + +// NewMemoryStore returns a MemoryStore using wall-clock time. +func NewMemoryStore(ttl time.Duration) *MemoryStore { + return newMemoryStoreWithClock(ttl, realClock{}) +} + +// newMemoryStoreWithClock is the test-only ctor that lets tests inject a +// fakeClock for deterministic TTL behaviour. +func newMemoryStoreWithClock(ttl time.Duration, clk Clock) *MemoryStore { + return &MemoryStore{ + ttl: ttl, + clock: clk, + maps: make(map[string]*memEntry), + } +} + +// Put stores or merges a connection-map for peerPubKey. +// - Out-of-order seq -> drop silently. +// - full_snapshot=true OR no prior entry -> replace (deep-copied). +// - full_snapshot=false with prior entry -> delta-merge per remote_pubkey. +func (s *MemoryStore) Put(peerPubKey string, m *mgmProto.PeerConnectionMap) { + s.mu.Lock() + defer s.mu.Unlock() + + prev := s.maps[peerPubKey] + if prev != nil && m.GetSeq() > 0 && m.GetSeq() <= prev.m.GetSeq() { + return + } + + stored := proto.Clone(m).(*mgmProto.PeerConnectionMap) + if !m.GetFullSnapshot() && prev != nil { + merged := proto.Clone(prev.m).(*mgmProto.PeerConnectionMap) + merged.Seq = m.GetSeq() + merged.FullSnapshot = false + // Keep the latest non-zero refresh-nonce. A snapshot pushed in + // response to nonce N must remain reachable via GET ?since=N + // even when a regular delta with InResponseToNonce=0 arrives + // shortly after; otherwise the refresh polling client gives up + // and falls back to the next sync interval (~60 s gap). + if m.GetInResponseToNonce() > merged.GetInResponseToNonce() { + merged.InResponseToNonce = m.GetInResponseToNonce() + } + byKey := make(map[string]int, len(merged.Entries)) + for i, e := range merged.Entries { + byKey[e.GetRemotePubkey()] = i + } + for _, ne := range stored.Entries { + if idx, ok := byKey[ne.GetRemotePubkey()]; ok { + merged.Entries[idx] = ne + } else { + merged.Entries = append(merged.Entries, ne) + byKey[ne.GetRemotePubkey()] = len(merged.Entries) - 1 + } + } + stored = merged + } + s.maps[peerPubKey] = &memEntry{m: stored, updatedAt: s.clock.Now()} +} + +// Get returns a deep copy of the cached map for peerPubKey, or false if +// missing or TTL-expired. +func (s *MemoryStore) Get(peerPubKey string) (*mgmProto.PeerConnectionMap, bool) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.maps[peerPubKey] + if !ok { + return nil, false + } + if s.clock.Now().Sub(e.updatedAt) > s.ttl { + delete(s.maps, peerPubKey) + return nil, false + } + return proto.Clone(e.m).(*mgmProto.PeerConnectionMap), true +} + +// GetWithNonceCheck returns the cached map only if its +// InResponseToNonce >= sinceNonce (refresh-flow polling). Same TTL + +// deep-copy semantics as Get. +func (s *MemoryStore) GetWithNonceCheck(peerPubKey string, since uint64) (*mgmProto.PeerConnectionMap, bool) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.maps[peerPubKey] + if !ok { + return nil, false + } + if since > 0 && e.m.GetInResponseToNonce() < since { + return nil, false + } + if s.clock.Now().Sub(e.updatedAt) > s.ttl { + delete(s.maps, peerPubKey) + return nil, false + } + return proto.Clone(e.m).(*mgmProto.PeerConnectionMap), true +} diff --git a/management/server/peer_connections/store_test.go b/management/server/peer_connections/store_test.go new file mode 100644 index 00000000000..a4ab5b387e7 --- /dev/null +++ b/management/server/peer_connections/store_test.go @@ -0,0 +1,103 @@ +package peer_connections + +import ( + "testing" + "time" + + "google.golang.org/protobuf/types/known/timestamppb" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +type fakeClock struct{ now time.Time } + +func (c *fakeClock) Now() time.Time { return c.now } +func (c *fakeClock) advance(d time.Duration) { c.now = c.now.Add(d) } + +func newStoreWithClock(ttl time.Duration) (*MemoryStore, *fakeClock) { + clk := &fakeClock{now: time.Now()} + s := newMemoryStoreWithClock(ttl, clk) + return s, clk +} + +func TestMemoryStore_PutFullThenGet(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 10}}, + }) + got, ok := s.Get("peerA") + if !ok { + t.Fatal("expected entry") + } + if len(got.GetEntries()) != 1 || got.GetEntries()[0].GetRemotePubkey() != "peerB" { + t.Errorf("unexpected entries: %+v", got.GetEntries()) + } +} + +func TestMemoryStore_DeepCopyOnReturn(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{ + Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "peerB", LastHandshake: timestamppb.New(time.Now())}, + }, + }) + got1, _ := s.Get("peerA") + got1.GetEntries()[0].RemotePubkey = "MUTATED" + got2, _ := s.Get("peerA") + if got2.GetEntries()[0].GetRemotePubkey() != "peerB" { + t.Errorf("Get returned shared pointer; mutation leaked: %s", got2.GetEntries()[0].GetRemotePubkey()) + } +} + +func TestMemoryStore_DeltaMerges(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true, + Entries: []*mgmProto.PeerConnectionEntry{ + {RemotePubkey: "peerB", LatencyMs: 10}, + {RemotePubkey: "peerC", LatencyMs: 30}, + }}) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 2, FullSnapshot: false, + Entries: []*mgmProto.PeerConnectionEntry{{RemotePubkey: "peerB", LatencyMs: 14}}}) + got, _ := s.Get("peerA") + if len(got.GetEntries()) != 2 { + t.Fatalf("want 2 entries, got %d", len(got.GetEntries())) + } + for _, e := range got.GetEntries() { + if e.GetRemotePubkey() == "peerB" && e.GetLatencyMs() != 14 { + t.Errorf("peerB latency not updated: %d", e.GetLatencyMs()) + } + } +} + +func TestMemoryStore_OutOfOrderDropped(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 5, FullSnapshot: true}) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 3, FullSnapshot: true}) + got, _ := s.Get("peerA") + if got.GetSeq() != 5 { + t.Errorf("want seq 5, got %d", got.GetSeq()) + } +} + +func TestMemoryStore_TTLExpires(t *testing.T) { + s, clk := newStoreWithClock(50 * time.Millisecond) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true}) + clk.advance(60 * time.Millisecond) + if _, ok := s.Get("peerA"); ok { + t.Error("expected TTL-expired entry to be gone") + } +} + +func TestMemoryStore_NonceCheck(t *testing.T) { + s, _ := newStoreWithClock(time.Hour) + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 1, FullSnapshot: true, InResponseToNonce: 0}) + if _, ok := s.GetWithNonceCheck("peerA", 5); ok { + t.Error("expected GetWithNonceCheck to refuse stale data when sinceNonce > InResponseToNonce") + } + s.Put("peerA", &mgmProto.PeerConnectionMap{Seq: 2, FullSnapshot: true, InResponseToNonce: 5}) + if _, ok := s.GetWithNonceCheck("peerA", 5); !ok { + t.Error("expected GetWithNonceCheck to return when InResponseToNonce >= sinceNonce") + } +} diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 36809d354f2..e5ddc123e9e 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -1156,7 +1156,7 @@ func TestToSyncResponse(t *testing.T) { } dnsCache := &cache.DNSConfigCache{} accountSettings := &types.Settings{RoutingPeerDNSResolutionEnabled: true} - response := grpc.ToSyncResponse(context.Background(), config, config.HttpConfig, config.DeviceAuthorizationFlow, peer, turnRelayToken, turnRelayToken, networkMap, dnsName, checks, dnsCache, accountSettings, nil, []string{}, int64(dnsForwarderPort)) + response := grpc.ToSyncResponse(context.Background(), config, config.HttpConfig, config.DeviceAuthorizationFlow, peer, turnRelayToken, turnRelayToken, networkMap, dnsName, checks, dnsCache, accountSettings, nil, []string{}, int64(dnsForwarderPort), nil) assert.NotNil(t, response) // assert peer config diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 1fa3d08ee5e..f88ca4d9380 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -1513,6 +1513,14 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc settings_jwt_groups_enabled, settings_jwt_groups_claim_name, settings_jwt_allow_groups, settings_routing_peer_dns_resolution_enabled, settings_dns_domain, settings_network_range, settings_lazy_connection_enabled, + -- Phase-3.7i (#5989) connection-mode columns. The pgx fast + -- path must SELECT these or new modes silently regress to the + -- legacy LazyConnectionEnabled bool, which clients then + -- interpret as ModeP2P (eager) -- defeating the picker. + settings_connection_mode, + settings_relay_timeout_seconds, + settings_p2p_timeout_seconds, + settings_p2p_retry_max_seconds, -- Embedded ExtraSettings settings_extra_peer_approval_enabled, settings_extra_user_approval_required, settings_extra_integrated_validator, settings_extra_integrated_validator_groups @@ -1532,6 +1540,10 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc sDNSDomain sql.NullString sNetworkRange sql.NullString sLazyConnectionEnabled sql.NullBool + sConnectionMode sql.NullString + sRelayTimeoutSeconds sql.NullInt64 + sP2pTimeoutSeconds sql.NullInt64 + sP2pRetryMaxSeconds sql.NullInt64 sExtraPeerApprovalEnabled sql.NullBool sExtraUserApprovalRequired sql.NullBool sExtraIntegratedValidator sql.NullString @@ -1553,6 +1565,7 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc &sJWTGroupsEnabled, &sJWTGroupsClaimName, &sJWTAllowGroups, &sRoutingPeerDNSResolutionEnabled, &sDNSDomain, &sNetworkRange, &sLazyConnectionEnabled, + &sConnectionMode, &sRelayTimeoutSeconds, &sP2pTimeoutSeconds, &sP2pRetryMaxSeconds, &sExtraPeerApprovalEnabled, &sExtraUserApprovalRequired, &sExtraIntegratedValidator, &sExtraIntegratedValidatorGroups, ) @@ -1615,6 +1628,22 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc if sLazyConnectionEnabled.Valid { account.Settings.LazyConnectionEnabled = sLazyConnectionEnabled.Bool } + if sConnectionMode.Valid { + v := sConnectionMode.String + account.Settings.ConnectionMode = &v + } + if sRelayTimeoutSeconds.Valid { + v := uint32(sRelayTimeoutSeconds.Int64) + account.Settings.RelayTimeoutSeconds = &v + } + if sP2pTimeoutSeconds.Valid { + v := uint32(sP2pTimeoutSeconds.Int64) + account.Settings.P2pTimeoutSeconds = &v + } + if sP2pRetryMaxSeconds.Valid { + v := uint32(sP2pRetryMaxSeconds.Int64) + account.Settings.P2pRetryMaxSeconds = &v + } if sJWTAllowGroups.Valid { _ = json.Unmarshal([]byte(sJWTAllowGroups.String), &account.Settings.JWTAllowGroups) } diff --git a/management/server/types/settings.go b/management/server/types/settings.go index 4ea79ec72fc..19e5085c1b8 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -58,6 +58,27 @@ type Settings struct { // LazyConnectionEnabled indicates if the experimental feature is enabled or disabled LazyConnectionEnabled bool `gorm:"default:false"` + // ConnectionMode is the account-wide default connection mode (Phase 1 + // of issue #5989). Nullable: NULL means "fall back to LazyConnectionEnabled". + // Stored as the canonical lower-kebab-case string (e.g. "p2p-lazy"). + ConnectionMode *string `gorm:"type:varchar(32);default:null"` + + // RelayTimeoutSeconds, when non-NULL, overrides the built-in default + // (5 min). 0 = "never tear down". Nullable to distinguish "use default" + // from "explicit 0". + RelayTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pTimeoutSeconds is reserved for Phase 2; same nullable semantics. + // Built-in default in Phase 1: 180 min, but not yet effective. + P2pTimeoutSeconds *uint32 `gorm:"default:null"` + + // P2pRetryMaxSeconds is reserved for Phase 3 (#5989). Caps the ICE- + // failure backoff sequence in p2p-dynamic mode. NULL = use daemon's + // built-in default (900s = 15 min). 0 = disable backoff (treated + // internally as "user-explicit-disable" via uint32-max sentinel on + // the wire). + P2pRetryMaxSeconds *uint32 `gorm:"default:null"` + // AutoUpdateVersion client auto-update version AutoUpdateVersion string `gorm:"default:'disabled'"` @@ -92,6 +113,10 @@ func (s *Settings) Copy() *Settings { PeerExposeEnabled: s.PeerExposeEnabled, PeerExposeGroups: slices.Clone(s.PeerExposeGroups), LazyConnectionEnabled: s.LazyConnectionEnabled, + ConnectionMode: cloneStringPtr(s.ConnectionMode), + RelayTimeoutSeconds: cloneUint32Ptr(s.RelayTimeoutSeconds), + P2pTimeoutSeconds: cloneUint32Ptr(s.P2pTimeoutSeconds), + P2pRetryMaxSeconds: cloneUint32Ptr(s.P2pRetryMaxSeconds), DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, AutoUpdateVersion: s.AutoUpdateVersion, @@ -138,3 +163,23 @@ func (e *ExtraSettings) Copy() *ExtraSettings { FlowDnsCollectionEnabled: e.FlowDnsCollectionEnabled, } } + +// cloneStringPtr returns a deep copy of a *string (nil-safe). Used by +// Settings.Copy for the new nullable ConnectionMode field. +func cloneStringPtr(p *string) *string { + if p == nil { + return nil + } + v := *p + return &v +} + +// cloneUint32Ptr returns a deep copy of a *uint32 (nil-safe). Used by +// Settings.Copy for the new nullable timeout fields. +func cloneUint32Ptr(p *uint32) *uint32 { + if p == nil { + return nil + } + v := *p + return &v +} diff --git a/management/server/types/settings_test.go b/management/server/types/settings_test.go new file mode 100644 index 00000000000..b6a42f6c6ba --- /dev/null +++ b/management/server/types/settings_test.go @@ -0,0 +1,20 @@ +package types + +import "testing" + +func TestSettings_Copy_P2pRetryMaxSeconds(t *testing.T) { + v := uint32(900) + src := &Settings{P2pRetryMaxSeconds: &v} + dst := src.Copy() + if dst.P2pRetryMaxSeconds == nil { + t.Fatal("Copy lost P2pRetryMaxSeconds pointer") + } + if *dst.P2pRetryMaxSeconds != 900 { + t.Fatalf("expected 900, got %d", *dst.P2pRetryMaxSeconds) + } + // Verify it's a deep copy (different pointers) + *dst.P2pRetryMaxSeconds = 600 + if *src.P2pRetryMaxSeconds != 900 { + t.Fatal("Copy did not deep-clone P2pRetryMaxSeconds") + } +} diff --git a/shared/connectionmode/mode.go b/shared/connectionmode/mode.go new file mode 100644 index 00000000000..d3b1c9e14e4 --- /dev/null +++ b/shared/connectionmode/mode.go @@ -0,0 +1,128 @@ +// Package connectionmode defines the Mode type used to control how a peer +// establishes connections to other peers. Introduced in Phase 1 of the +// connection-mode consolidation (issue #5989) to replace the historical +// pair (NB_FORCE_RELAY, NB_ENABLE_EXPERIMENTAL_LAZY_CONN). +package connectionmode + +import ( + "fmt" + "strings" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Mode is a connection mode for peer-to-peer (or relay-only) connections. +// ModeUnspecified is the zero value and indicates "fall back to the next +// resolution source" (env -> config -> server-pushed -> legacy bool). +type Mode int + +const ( + ModeUnspecified Mode = iota + ModeRelayForced + ModeP2P + ModeP2PLazy + ModeP2PDynamic + // ModeFollowServer is a client-side sentinel: setting this in the + // client config explicitly clears any local override so the + // server-pushed value (or its legacy fallback) is used. It MUST NOT + // be sent on the wire -- ToProto returns UNSPECIFIED for it. + ModeFollowServer +) + +// String returns the canonical lower-kebab-case name of the mode. +func (m Mode) String() string { + switch m { + case ModeRelayForced: + return "relay-forced" + case ModeP2P: + return "p2p" + case ModeP2PLazy: + return "p2p-lazy" + case ModeP2PDynamic: + return "p2p-dynamic" + case ModeFollowServer: + return "follow-server" + default: + return "" + } +} + +// ParseString accepts the canonical name (case-insensitive, surrounding +// whitespace tolerated) and returns the corresponding Mode. Empty input +// returns ModeUnspecified with no error. Unknown input returns +// ModeUnspecified with an error. +func ParseString(s string) (Mode, error) { + switch strings.ToLower(strings.TrimSpace(s)) { + case "": + return ModeUnspecified, nil + case "relay-forced": + return ModeRelayForced, nil + case "p2p": + return ModeP2P, nil + case "p2p-lazy": + return ModeP2PLazy, nil + case "p2p-dynamic": + return ModeP2PDynamic, nil + case "follow-server": + return ModeFollowServer, nil + default: + return ModeUnspecified, fmt.Errorf("unknown connection mode %q", s) + } +} + +// FromProto translates a proto enum value to the internal Mode. +func FromProto(m mgmProto.ConnectionMode) Mode { + switch m { + case mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED: + return ModeRelayForced + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P: + return ModeP2P + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY: + return ModeP2PLazy + case mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC: + return ModeP2PDynamic + default: + return ModeUnspecified + } +} + +// ToProto translates the internal Mode to a proto enum value. +// ModeFollowServer is a client-side concept and intentionally maps to +// UNSPECIFIED so it never appears on the wire. +func (m Mode) ToProto() mgmProto.ConnectionMode { + switch m { + case ModeRelayForced: + return mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED + case ModeP2P: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P + case ModeP2PLazy: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY + case ModeP2PDynamic: + return mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC + default: + return mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED + } +} + +// ResolveLegacyLazyBool maps the historical Settings.LazyConnectionEnabled +// boolean to the new Mode. Used when a new client receives an old server's +// PeerConfig (ConnectionMode = UNSPECIFIED) or when the management server +// has no explicit Settings.ConnectionMode set yet. +func ResolveLegacyLazyBool(lazy bool) Mode { + if lazy { + return ModeP2PLazy + } + return ModeP2P +} + +// ToLazyConnectionEnabled is the inverse mapping for backwards-compat. +// Used by toPeerConfig() so old clients (which only know the boolean) +// still get a sensible behaviour. +// +// Note: ModeRelayForced cannot be expressed via the legacy boolean and +// falls back to false. This is a structural compat gap documented in the +// release notes; admins must set NB_FORCE_RELAY=true on old clients +// or upgrade them. +func (m Mode) ToLazyConnectionEnabled() bool { + return m == ModeP2PLazy +} diff --git a/shared/connectionmode/mode_test.go b/shared/connectionmode/mode_test.go new file mode 100644 index 00000000000..01a9c11c929 --- /dev/null +++ b/shared/connectionmode/mode_test.go @@ -0,0 +1,106 @@ +package connectionmode + +import ( + "testing" + + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestParseString(t *testing.T) { + cases := []struct { + input string + want Mode + wantErr bool + }{ + {"relay-forced", ModeRelayForced, false}, + {"p2p", ModeP2P, false}, + {"p2p-lazy", ModeP2PLazy, false}, + {"p2p-dynamic", ModeP2PDynamic, false}, + {"follow-server", ModeFollowServer, false}, + {"", ModeUnspecified, false}, + {"P2P", ModeP2P, false}, + {" p2p-lazy ", ModeP2PLazy, false}, + {"junk", ModeUnspecified, true}, + } + for _, c := range cases { + got, err := ParseString(c.input) + if (err != nil) != c.wantErr { + t.Errorf("ParseString(%q): err=%v wantErr=%v", c.input, err, c.wantErr) + continue + } + if got != c.want { + t.Errorf("ParseString(%q) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestFromProto(t *testing.T) { + cases := []struct { + input mgmProto.ConnectionMode + want Mode + }{ + {mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED, ModeUnspecified}, + {mgmProto.ConnectionMode_CONNECTION_MODE_RELAY_FORCED, ModeRelayForced}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P, ModeP2P}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_LAZY, ModeP2PLazy}, + {mgmProto.ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC, ModeP2PDynamic}, + } + for _, c := range cases { + got := FromProto(c.input) + if got != c.want { + t.Errorf("FromProto(%v) = %v, want %v", c.input, got, c.want) + } + } +} + +func TestToProto(t *testing.T) { + for _, m := range []Mode{ModeUnspecified, ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic} { + got := FromProto(m.ToProto()) + if got != m { + t.Errorf("round-trip Mode %v -> proto -> Mode = %v", m, got) + } + } + if got := ModeFollowServer.ToProto(); got != mgmProto.ConnectionMode_CONNECTION_MODE_UNSPECIFIED { + t.Errorf("ModeFollowServer.ToProto() = %v, want UNSPECIFIED", got) + } +} + +func TestResolveLegacyLazyBool(t *testing.T) { + if got := ResolveLegacyLazyBool(true); got != ModeP2PLazy { + t.Errorf("ResolveLegacyLazyBool(true) = %v, want ModeP2PLazy", got) + } + if got := ResolveLegacyLazyBool(false); got != ModeP2P { + t.Errorf("ResolveLegacyLazyBool(false) = %v, want ModeP2P", got) + } +} + +func TestToLazyConnectionEnabled(t *testing.T) { + cases := []struct { + mode Mode + want bool + }{ + {ModeRelayForced, false}, + {ModeP2P, false}, + {ModeP2PLazy, true}, + {ModeP2PDynamic, false}, + {ModeUnspecified, false}, + } + for _, c := range cases { + got := c.mode.ToLazyConnectionEnabled() + if got != c.want { + t.Errorf("Mode %v ToLazyConnectionEnabled() = %v, want %v", c.mode, got, c.want) + } + } +} + +func TestStringRoundTrip(t *testing.T) { + for _, m := range []Mode{ModeRelayForced, ModeP2P, ModeP2PLazy, ModeP2PDynamic, ModeFollowServer} { + got, err := ParseString(m.String()) + if err != nil { + t.Errorf("round-trip parse of %v.String() failed: %v", m, err) + } + if got != m { + t.Errorf("round-trip %v -> %q -> %v", m, m.String(), got) + } + } +} diff --git a/shared/management/client/client.go b/shared/management/client/client.go index 18efba87b87..e9354d1e521 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -27,6 +27,17 @@ type Client interface { // Used to validate connectivity before committing configuration changes. HealthCheck() error SyncMeta(sysInfo *system.Info) error + // SyncPeerConnections sends the peer's current per-peer connection map + // to the management server as a unary RPC. Phase 3.7i of #5989. + SyncPeerConnections(ctx context.Context, m *proto.PeerConnectionMap) error + // SetEffectiveConnConfig records the engine-resolved connection mode/ + // timeouts to report in subsequent Sync/Login/SyncMeta PeerSystemMeta. + // Phase 3.7i of #5989. + SetEffectiveConnConfig(eff EffectiveConnConfig) + // SetSnapshotRequestHandler registers a callback invoked when the + // management server sends a SnapshotRequest over the Sync server-stream. + // Phase 3.7i of #5989. + SetSnapshotRequestHandler(fn func(nonce uint64)) Logout() error CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) RenewExpose(ctx context.Context, domain string) error diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index a8e8172dc88..05b684eaf31 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/netbirdio/netbird/management/server/peer_connections" "github.com/netbirdio/management-integrations/integrations" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" @@ -138,7 +139,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil, nil, peer_connections.NewMemoryStore(time.Hour), peer_connections.NewSnapshotRouter()) if err != nil { t.Fatal(err) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 80625fe06c3..396314e4515 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -55,6 +55,13 @@ type GrpcClient struct { connStateCallback ConnStateNotifier connStateCallbackLock sync.RWMutex serverURL string + + effMu sync.RWMutex + effective EffectiveConnConfig + + // Phase 3.7i (#5989): handler for server-pushed SnapshotRequests. + snapMu sync.Mutex + onSnapshotRequest func(nonce uint64) } type ExposeRequest struct { @@ -435,7 +442,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err } func (c *GrpcClient) connectToSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { - req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo)} + req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig())} myPrivateKey := c.key myPublicKey := myPrivateKey.PublicKey() @@ -473,6 +480,15 @@ func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncCli return err } + if req := decryptedResp.GetSnapshotRequest(); req != nil { + c.snapMu.Lock() + cb := c.onSnapshotRequest + c.snapMu.Unlock() + if cb != nil { + cb(req.GetNonce()) + } + } + if err := msgHandler(decryptedResp); err != nil { log.Errorf("failed handling an update message received from Management Service: %v", err.Error()) } @@ -595,7 +611,7 @@ func (c *GrpcClient) Register(setupKey string, jwtToken string, sysInfo *system. SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo, c.effectiveConnConfig()), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // Login attempts login to Management Server. Takes care of encrypting and decrypting messages. @@ -604,7 +620,7 @@ func (c *GrpcClient) Login(sysInfo *system.Info, pubSSHKey []byte, dnsLabels dom SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig()), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // GetDeviceAuthorizationFlow returns a device authorization flow information. @@ -700,7 +716,7 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return err } - syncMetaReq, err := encryption.EncryptMessage(*serverPubKey, c.key, &proto.SyncMetaRequest{Meta: infoToMetaData(sysInfo)}) + syncMetaReq, err := encryption.EncryptMessage(*serverPubKey, c.key, &proto.SyncMetaRequest{Meta: infoToMetaData(sysInfo, c.effectiveConnConfig())}) if err != nil { log.Errorf("failed to encrypt message: %s", err) return err @@ -716,6 +732,36 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return err } +// SyncPeerConnections is the GrpcClient implementation. Phase 3.7i of +// #5989. Mirrors SyncMeta: fetches server pubkey, encrypts the +// PeerConnectionMap with the peer's wg key, calls the new unary RPC. +func (c *GrpcClient) SyncPeerConnections(ctx context.Context, m *proto.PeerConnectionMap) error { + if !c.ready() { + return errors.New(errMsgNoMgmtConnection) + } + + serverPubKey, err := c.getServerPublicKey() + if err != nil { + log.Debugf(errMsgMgmtPublicKey, err) + return err + } + + encrypted, err := encryption.EncryptMessage(*serverPubKey, c.key, m) + if err != nil { + log.Errorf("encrypt PeerConnectionMap: %s", err) + return err + } + + mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout) + defer cancel() + + _, err = c.realClient.SyncPeerConnections(mgmCtx, &proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encrypted, + }) + return err +} + func (c *GrpcClient) notifyDisconnected(err error) { c.connStateCallbackLock.RLock() defer c.connStateCallbackLock.RUnlock() @@ -883,7 +929,43 @@ func toProtoExposeServiceRequest(req ExposeRequest) (*proto.ExposeServiceRequest }, nil } -func infoToMetaData(info *system.Info) *proto.PeerSystemMeta { +// EffectiveConnConfig captures the peer-engine-resolved connection mode +// + timeouts that should be reported to mgmt alongside system info. +// Phase 3.7i of #5989. +type EffectiveConnConfig struct { + Mode string + RelayTimeoutSecs uint32 + P2PTimeoutSecs uint32 + P2PRetryMaxSecs uint32 +} + +// effectiveConnConfig pulls the engine-resolved connection mode/timeouts +// to report in PeerSystemMeta. Empty when the engine has not registered +// itself with the client (early startup / standalone mock). Phase 3.7i. +func (c *GrpcClient) effectiveConnConfig() EffectiveConnConfig { + c.effMu.RLock() + defer c.effMu.RUnlock() + return c.effective +} + +// SetEffectiveConnConfig is called by the engine each time the resolved +// mode changes (typically once per NetworkMap update). Phase 3.7i. +func (c *GrpcClient) SetEffectiveConnConfig(eff EffectiveConnConfig) { + c.effMu.Lock() + defer c.effMu.Unlock() + c.effective = eff +} + +// SetSnapshotRequestHandler registers a callback invoked when the +// management server sends a SnapshotRequest over the Sync server-stream. +// Phase 3.7i of #5989. +func (c *GrpcClient) SetSnapshotRequestHandler(fn func(nonce uint64)) { + c.snapMu.Lock() + c.onSnapshotRequest = fn + c.snapMu.Unlock() +} + +func infoToMetaData(info *system.Info, eff EffectiveConnConfig) *proto.PeerSystemMeta { if info == nil { return nil } @@ -940,5 +1022,10 @@ func infoToMetaData(info *system.Info) *proto.PeerSystemMeta { LazyConnectionEnabled: info.LazyConnectionEnabled, }, + + EffectiveConnectionMode: eff.Mode, + EffectiveRelayTimeoutSecs: eff.RelayTimeoutSecs, + EffectiveP2PTimeoutSecs: eff.P2PTimeoutSecs, + EffectiveP2PRetryMaxSecs: eff.P2PRetryMaxSecs, } } diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 361e8ffadfc..5f6fa8dca81 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -19,7 +19,10 @@ type MockClient struct { GetServerURLFunc func() string HealthCheckFunc func() error SyncMetaFunc func(sysInfo *system.Info) error - LogoutFunc func() error + SyncPeerConnectionsFunc func(ctx context.Context, m *proto.PeerConnectionMap) error + SetEffectiveConnConfigFunc func(eff EffectiveConnConfig) + SetSnapshotRequestHandlerFunc func(fn func(nonce uint64)) + LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error CreateExposeFunc func(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) RenewExposeFunc func(ctx context.Context, domain string) error @@ -106,6 +109,25 @@ func (m *MockClient) SyncMeta(sysInfo *system.Info) error { return m.SyncMetaFunc(sysInfo) } +func (m *MockClient) SyncPeerConnections(ctx context.Context, pcm *proto.PeerConnectionMap) error { + if m.SyncPeerConnectionsFunc != nil { + return m.SyncPeerConnectionsFunc(ctx, pcm) + } + return nil +} + +func (m *MockClient) SetEffectiveConnConfig(eff EffectiveConnConfig) { + if m.SetEffectiveConnConfigFunc != nil { + m.SetEffectiveConnConfigFunc(eff) + } +} + +func (m *MockClient) SetSnapshotRequestHandler(fn func(nonce uint64)) { + if m.SetSnapshotRequestHandlerFunc != nil { + m.SetSnapshotRequestHandlerFunc(fn) + } +} + func (m *MockClient) Logout() error { if m.LogoutFunc == nil { return nil diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 327e2061425..c0ea938ea5f 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -359,6 +359,50 @@ components: description: Enables or disables experimental lazy connection type: boolean example: true + connection_mode: + x-experimental: true + type: string + enum: [relay-forced, p2p, p2p-lazy, p2p-dynamic] + nullable: true + description: | + Account-wide default peer-connection mode. NULL means + "fall back to lazy_connection_enabled" for backwards compatibility. + Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + functional. p2p-dynamic is reserved (passes through as p2p in + Phase 1; will become functional in Phase 2). + p2p_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default ICE-worker idle timeout in seconds. 0 = never tear down. + Effective only in p2p-dynamic mode (added in Phase 2). + NULL means "use built-in default" (180 minutes). + p2p_retry_max_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Maximum interval between P2P retry attempts after consecutive + ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + disable backoff (always retry immediately, Phase-2 behavior). + Effective only in p2p-dynamic mode (added in Phase 3). + example: 900 + relay_timeout_seconds: + x-experimental: true + type: integer + format: int64 + minimum: 0 + nullable: true + description: | + Default relay-worker idle timeout in seconds. 0 = never tear + down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + client. NULL means "use built-in default" (5 minutes). auto_update_version: description: Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") type: string diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index dc916f81ac9..83d1ffef827 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,6 +1,6 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.7.0 DO NOT EDIT. package api import ( @@ -13,8 +13,8 @@ import ( ) const ( - BearerAuthScopes = "BearerAuth.Scopes" - TokenAuthScopes = "TokenAuth.Scopes" + BearerAuthScopes bearerAuthContextKey = "BearerAuth.Scopes" + TokenAuthScopes tokenAuthContextKey = "TokenAuth.Scopes" ) // Defines values for AccessRestrictionsCrowdsecMode. @@ -38,6 +38,30 @@ func (e AccessRestrictionsCrowdsecMode) Valid() bool { } } +// Defines values for AccountSettingsConnectionMode. +const ( + AccountSettingsConnectionModeP2p AccountSettingsConnectionMode = "p2p" + AccountSettingsConnectionModeP2pDynamic AccountSettingsConnectionMode = "p2p-dynamic" + AccountSettingsConnectionModeP2pLazy AccountSettingsConnectionMode = "p2p-lazy" + AccountSettingsConnectionModeRelayForced AccountSettingsConnectionMode = "relay-forced" +) + +// Valid indicates whether the value is a known member of the AccountSettingsConnectionMode enum. +func (e AccountSettingsConnectionMode) Valid() bool { + switch e { + case AccountSettingsConnectionModeP2p: + return true + case AccountSettingsConnectionModeP2pDynamic: + return true + case AccountSettingsConnectionModeP2pLazy: + return true + case AccountSettingsConnectionModeRelayForced: + return true + default: + return false + } +} + // Defines values for CreateAzureIntegrationRequestHost. const ( CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" @@ -511,6 +535,7 @@ func (e GroupMinimumIssued) Valid() bool { // Defines values for IdentityProviderType. const ( + IdentityProviderTypeAdfs IdentityProviderType = "adfs" IdentityProviderTypeEntra IdentityProviderType = "entra" IdentityProviderTypeGoogle IdentityProviderType = "google" IdentityProviderTypeMicrosoft IdentityProviderType = "microsoft" @@ -518,12 +543,13 @@ const ( IdentityProviderTypeOkta IdentityProviderType = "okta" IdentityProviderTypePocketid IdentityProviderType = "pocketid" IdentityProviderTypeZitadel IdentityProviderType = "zitadel" - IdentityProviderTypeAdfs IdentityProviderType = "adfs" ) // Valid indicates whether the value is a known member of the IdentityProviderType enum. func (e IdentityProviderType) Valid() bool { switch e { + case IdentityProviderTypeAdfs: + return true case IdentityProviderTypeEntra: return true case IdentityProviderTypeGoogle: @@ -538,8 +564,6 @@ func (e IdentityProviderType) Valid() bool { return true case IdentityProviderTypeZitadel: return true - case IdentityProviderTypeAdfs: - return true default: return false } @@ -1455,6 +1479,13 @@ type AccountSettings struct { // AutoUpdateVersion Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") AutoUpdateVersion *string `json:"auto_update_version,omitempty"` + // ConnectionMode Account-wide default peer-connection mode. NULL means + // "fall back to lazy_connection_enabled" for backwards compatibility. + // Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are + // functional. p2p-dynamic is reserved (passes through as p2p in + // Phase 1; will become functional in Phase 2). + ConnectionMode *AccountSettingsConnectionMode `json:"connection_mode,omitempty"` + // DnsDomain Allows to define a custom dns domain for the account DnsDomain *string `json:"dns_domain,omitempty"` @@ -1483,6 +1514,17 @@ type AccountSettings struct { // NetworkRange Allows to define a custom network range for the account in CIDR format NetworkRange *string `json:"network_range,omitempty"` + // P2pRetryMaxSeconds Maximum interval between P2P retry attempts after consecutive + // ICE failures, in seconds. Default 900 (= 15 min). Set to 0 to + // disable backoff (always retry immediately, Phase-2 behavior). + // Effective only in p2p-dynamic mode (added in Phase 3). + P2pRetryMaxSeconds *int64 `json:"p2p_retry_max_seconds,omitempty"` + + // P2pTimeoutSeconds Default ICE-worker idle timeout in seconds. 0 = never tear down. + // Effective only in p2p-dynamic mode (added in Phase 2). + // NULL means "use built-in default" (180 minutes). + P2pTimeoutSeconds *int64 `json:"p2p_timeout_seconds,omitempty"` + // PeerExposeEnabled Enables or disables peer expose. If enabled, peers can expose local services through the reverse proxy using the CLI. PeerExposeEnabled bool `json:"peer_expose_enabled"` @@ -1504,10 +1546,23 @@ type AccountSettings struct { // RegularUsersViewBlocked Allows blocking regular users from viewing parts of the system. RegularUsersViewBlocked bool `json:"regular_users_view_blocked"` + // RelayTimeoutSeconds Default relay-worker idle timeout in seconds. 0 = never tear + // down. Effective in p2p-lazy and p2p-dynamic modes. Backwards- + // compat alias for NB_LAZY_CONN_INACTIVITY_THRESHOLD on the + // client. NULL means "use built-in default" (5 minutes). + RelayTimeoutSeconds *int64 `json:"relay_timeout_seconds,omitempty"` + // RoutingPeerDnsResolutionEnabled Enables or disables DNS resolution on the routing peers RoutingPeerDnsResolutionEnabled *bool `json:"routing_peer_dns_resolution_enabled,omitempty"` } +// AccountSettingsConnectionMode Account-wide default peer-connection mode. NULL means +// "fall back to lazy_connection_enabled" for backwards compatibility. +// Phase 1 of issue #5989: relay-forced, p2p, and p2p-lazy are +// functional. p2p-dynamic is reserved (passes through as p2p in +// Phase 1; will become functional in Phase 2). +type AccountSettingsConnectionMode string + // AvailablePorts defines model for AvailablePorts. type AvailablePorts struct { // Tcp Number of available TCP ports left on the ingress peer @@ -1626,7 +1681,9 @@ type Checks struct { // OsVersionCheck Posture check for the version of operating system OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"` - // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. + // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it + // contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, + // so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:"peer_network_range_check,omitempty"` // ProcessCheck Posture Check for binaries exist and are running in the peer’s system @@ -3312,7 +3369,9 @@ type PeerMinimum struct { Name string `json:"name"` } -// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. +// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it +// contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, +// so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. type PeerNetworkRangeCheck struct { // Action Action to take upon policy match Action PeerNetworkRangeCheckAction `json:"action"` @@ -4761,6 +4820,12 @@ type ZoneRequest struct { // Conflict Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. type Conflict = ErrorResponse +// bearerAuthContextKey is the context key for BearerAuth security scheme +type bearerAuthContextKey string + +// tokenAuthContextKey is the context key for TokenAuth security scheme +type tokenAuthContextKey string + // GetApiEventsNetworkTrafficParams defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParams struct { // Page Page number diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 604f9c79385..757c4df0c9d 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -71,6 +71,66 @@ func (JobStatus) EnumDescriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{0} } +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +type ConnectionMode int32 + +const ( + ConnectionMode_CONNECTION_MODE_UNSPECIFIED ConnectionMode = 0 + ConnectionMode_CONNECTION_MODE_RELAY_FORCED ConnectionMode = 1 + ConnectionMode_CONNECTION_MODE_P2P ConnectionMode = 2 + ConnectionMode_CONNECTION_MODE_P2P_LAZY ConnectionMode = 3 + ConnectionMode_CONNECTION_MODE_P2P_DYNAMIC ConnectionMode = 4 +) + +// Enum value maps for ConnectionMode. +var ( + ConnectionMode_name = map[int32]string{ + 0: "CONNECTION_MODE_UNSPECIFIED", + 1: "CONNECTION_MODE_RELAY_FORCED", + 2: "CONNECTION_MODE_P2P", + 3: "CONNECTION_MODE_P2P_LAZY", + 4: "CONNECTION_MODE_P2P_DYNAMIC", + } + ConnectionMode_value = map[string]int32{ + "CONNECTION_MODE_UNSPECIFIED": 0, + "CONNECTION_MODE_RELAY_FORCED": 1, + "CONNECTION_MODE_P2P": 2, + "CONNECTION_MODE_P2P_LAZY": 3, + "CONNECTION_MODE_P2P_DYNAMIC": 4, + } +) + +func (x ConnectionMode) Enum() *ConnectionMode { + p := new(ConnectionMode) + *p = x + return p +} + +func (x ConnectionMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionMode) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[1].Descriptor() +} + +func (ConnectionMode) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[1] +} + +func (x ConnectionMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionMode.Descriptor instead. +func (ConnectionMode) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{1} +} + type RuleProtocol int32 const ( @@ -113,11 +173,11 @@ func (x RuleProtocol) String() string { } func (RuleProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[1].Descriptor() + return file_management_proto_enumTypes[2].Descriptor() } func (RuleProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[1] + return &file_management_proto_enumTypes[2] } func (x RuleProtocol) Number() protoreflect.EnumNumber { @@ -126,7 +186,7 @@ func (x RuleProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use RuleProtocol.Descriptor instead. func (RuleProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{2} } type RuleDirection int32 @@ -159,11 +219,11 @@ func (x RuleDirection) String() string { } func (RuleDirection) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[2].Descriptor() + return file_management_proto_enumTypes[3].Descriptor() } func (RuleDirection) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[2] + return &file_management_proto_enumTypes[3] } func (x RuleDirection) Number() protoreflect.EnumNumber { @@ -172,7 +232,7 @@ func (x RuleDirection) Number() protoreflect.EnumNumber { // Deprecated: Use RuleDirection.Descriptor instead. func (RuleDirection) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{3} } type RuleAction int32 @@ -205,11 +265,11 @@ func (x RuleAction) String() string { } func (RuleAction) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[3].Descriptor() + return file_management_proto_enumTypes[4].Descriptor() } func (RuleAction) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[3] + return &file_management_proto_enumTypes[4] } func (x RuleAction) Number() protoreflect.EnumNumber { @@ -218,7 +278,7 @@ func (x RuleAction) Number() protoreflect.EnumNumber { // Deprecated: Use RuleAction.Descriptor instead. func (RuleAction) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{3} + return file_management_proto_rawDescGZIP(), []int{4} } type ExposeProtocol int32 @@ -260,11 +320,11 @@ func (x ExposeProtocol) String() string { } func (ExposeProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (ExposeProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x ExposeProtocol) Number() protoreflect.EnumNumber { @@ -273,7 +333,62 @@ func (x ExposeProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use ExposeProtocol.Descriptor instead. func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{4} + return file_management_proto_rawDescGZIP(), []int{5} +} + +type ConnType int32 + +const ( + ConnType_CONN_TYPE_UNSPECIFIED ConnType = 0 + ConnType_CONN_TYPE_IDLE ConnType = 1 + ConnType_CONN_TYPE_CONNECTING ConnType = 2 + ConnType_CONN_TYPE_P2P ConnType = 3 + ConnType_CONN_TYPE_RELAYED ConnType = 4 +) + +// Enum value maps for ConnType. +var ( + ConnType_name = map[int32]string{ + 0: "CONN_TYPE_UNSPECIFIED", + 1: "CONN_TYPE_IDLE", + 2: "CONN_TYPE_CONNECTING", + 3: "CONN_TYPE_P2P", + 4: "CONN_TYPE_RELAYED", + } + ConnType_value = map[string]int32{ + "CONN_TYPE_UNSPECIFIED": 0, + "CONN_TYPE_IDLE": 1, + "CONN_TYPE_CONNECTING": 2, + "CONN_TYPE_P2P": 3, + "CONN_TYPE_RELAYED": 4, + } +) + +func (x ConnType) Enum() *ConnType { + p := new(ConnType) + *p = x + return p +} + +func (x ConnType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnType) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[6].Descriptor() +} + +func (ConnType) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[6] +} + +func (x ConnType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnType.Descriptor instead. +func (ConnType) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{6} } type HostConfig_Protocol int32 @@ -315,11 +430,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[5].Descriptor() + return file_management_proto_enumTypes[7].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[5] + return &file_management_proto_enumTypes[7] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -358,11 +473,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[6].Descriptor() + return file_management_proto_enumTypes[8].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[6] + return &file_management_proto_enumTypes[8] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -790,6 +905,10 @@ type SyncResponse struct { NetworkMap *NetworkMap `protobuf:"bytes,5,opt,name=NetworkMap,proto3" json:"NetworkMap,omitempty"` // Posture checks to be evaluated by client Checks []*Checks `protobuf:"bytes,6,rep,name=Checks,proto3" json:"Checks,omitempty"` + // Phase 3.7i (#5989): on-demand refresh request for the peer's + // connection map. Peer responds via SyncPeerConnections RPC with + // in_response_to_nonce echoing this nonce. + SnapshotRequest *PeerSnapshotRequest `protobuf:"bytes,7,opt,name=snapshot_request,json=snapshotRequest,proto3" json:"snapshot_request,omitempty"` } func (x *SyncResponse) Reset() { @@ -866,6 +985,13 @@ func (x *SyncResponse) GetChecks() []*Checks { return nil } +func (x *SyncResponse) GetSnapshotRequest() *PeerSnapshotRequest { + if x != nil { + return x.SnapshotRequest + } + return nil +} + type SyncMetaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1363,6 +1489,12 @@ type PeerSystemMeta struct { Environment *Environment `protobuf:"bytes,15,opt,name=environment,proto3" json:"environment,omitempty"` Files []*File `protobuf:"bytes,16,rep,name=files,proto3" json:"files,omitempty"` Flags *Flags `protobuf:"bytes,17,opt,name=flags,proto3" json:"flags,omitempty"` + // Phase 3.7i (#5989): connection mode/timeouts this peer is actually + // running with. Mgmt copies into RemotePeerConfig of every other peer. + EffectiveConnectionMode string `protobuf:"bytes,50,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,51,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,52,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,53,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` } func (x *PeerSystemMeta) Reset() { @@ -1516,6 +1648,34 @@ func (x *PeerSystemMeta) GetFlags() *Flags { return nil } +func (x *PeerSystemMeta) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *PeerSystemMeta) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *PeerSystemMeta) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *PeerSystemMeta) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + type LoginResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2163,6 +2323,27 @@ type PeerConfig struct { Mtu int32 `protobuf:"varint,7,opt,name=mtu,proto3" json:"mtu,omitempty"` // Auto-update config AutoUpdate *AutoUpdateSettings `protobuf:"bytes,8,opt,name=autoUpdate,proto3" json:"autoUpdate,omitempty"` + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode `protobuf:"varint,11,opt,name=ConnectionMode,proto3,enum=management.ConnectionMode" json:"ConnectionMode,omitempty"` + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + P2PTimeoutSeconds uint32 `protobuf:"varint,12,opt,name=P2pTimeoutSeconds,proto3" json:"P2pTimeoutSeconds,omitempty"` + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + RelayTimeoutSeconds uint32 `protobuf:"varint,13,opt,name=RelayTimeoutSeconds,proto3" json:"RelayTimeoutSeconds,omitempty"` + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + P2PRetryMaxSeconds uint32 `protobuf:"varint,14,opt,name=P2pRetryMaxSeconds,proto3" json:"P2pRetryMaxSeconds,omitempty"` } func (x *PeerConfig) Reset() { @@ -2253,6 +2434,34 @@ func (x *PeerConfig) GetAutoUpdate() *AutoUpdateSettings { return nil } +func (x *PeerConfig) GetConnectionMode() ConnectionMode { + if x != nil { + return x.ConnectionMode + } + return ConnectionMode_CONNECTION_MODE_UNSPECIFIED +} + +func (x *PeerConfig) GetP2PTimeoutSeconds() uint32 { + if x != nil { + return x.P2PTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetRelayTimeoutSeconds() uint32 { + if x != nil { + return x.RelayTimeoutSeconds + } + return 0 +} + +func (x *PeerConfig) GetP2PRetryMaxSeconds() uint32 { + if x != nil { + return x.P2PRetryMaxSeconds + } + return 0 +} + type AutoUpdateSettings struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2597,6 +2806,36 @@ type RemotePeerConfig struct { // Peer fully qualified domain name Fqdn string `protobuf:"bytes,4,opt,name=fqdn,proto3" json:"fqdn,omitempty"` AgentVersion string `protobuf:"bytes,5,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + // Phase 3.7i (#5989): connection mode/timeouts the remote peer is + // actually running with (after env > local-cfg > server-pushed > legacy + // resolution), as reported by that peer in its PeerSystemMeta. Empty + // when remote peer pre-dates Phase 3.7i. + EffectiveConnectionMode string `protobuf:"bytes,6,opt,name=effective_connection_mode,json=effectiveConnectionMode,proto3" json:"effective_connection_mode,omitempty"` + EffectiveRelayTimeoutSecs uint32 `protobuf:"varint,7,opt,name=effective_relay_timeout_secs,json=effectiveRelayTimeoutSecs,proto3" json:"effective_relay_timeout_secs,omitempty"` + EffectiveP2PTimeoutSecs uint32 `protobuf:"varint,8,opt,name=effective_p2p_timeout_secs,json=effectiveP2pTimeoutSecs,proto3" json:"effective_p2p_timeout_secs,omitempty"` + EffectiveP2PRetryMaxSecs uint32 `protobuf:"varint,9,opt,name=effective_p2p_retry_max_secs,json=effectiveP2pRetryMaxSecs,proto3" json:"effective_p2p_retry_max_secs,omitempty"` + // Connection mode/timeouts the management server has configured for + // that peer via dashboard policy/group. UI compares effective vs + // configured to spot local overrides (≠ → ⚠). + ConfiguredConnectionMode string `protobuf:"bytes,10,opt,name=configured_connection_mode,json=configuredConnectionMode,proto3" json:"configured_connection_mode,omitempty"` + ConfiguredRelayTimeoutSecs uint32 `protobuf:"varint,11,opt,name=configured_relay_timeout_secs,json=configuredRelayTimeoutSecs,proto3" json:"configured_relay_timeout_secs,omitempty"` + ConfiguredP2PTimeoutSecs uint32 `protobuf:"varint,12,opt,name=configured_p2p_timeout_secs,json=configuredP2pTimeoutSecs,proto3" json:"configured_p2p_timeout_secs,omitempty"` + ConfiguredP2PRetryMaxSecs uint32 `protobuf:"varint,13,opt,name=configured_p2p_retry_max_secs,json=configuredP2pRetryMaxSecs,proto3" json:"configured_p2p_retry_max_secs,omitempty"` + // Phase 3.7i: server-knowledge fields surfaced to UIs without an + // extra Mgmt API call (already in the NetworkMap stream context). + LastSeenAtServer *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=last_seen_at_server,json=lastSeenAtServer,proto3" json:"last_seen_at_server,omitempty"` + Groups []string `protobuf:"bytes,15,rep,name=groups,proto3" json:"groups,omitempty"` + // Live online flag: peer.Status.Connected on the management server. + // True = peer is currently heartbeating. False = peer hasn't checked + // in (hardware/network down) but its login is still valid (otherwise + // it would be in OfflinePeers, not RemotePeers). + LiveOnline bool `protobuf:"varint,16,opt,name=live_online,json=liveOnline,proto3" json:"live_online,omitempty"` + // Server-knowledge marker: true when the management server is new + // enough to populate live_online authoritatively. Old servers leave + // this field at false (default), and new clients then fall back to + // legacy heuristics (assume online when live_online is false but + // last_seen_at_server is also unset, i.e. nothing is known). + ServerLivenessKnown bool `protobuf:"varint,17,opt,name=server_liveness_known,json=serverLivenessKnown,proto3" json:"server_liveness_known,omitempty"` } func (x *RemotePeerConfig) Reset() { @@ -2666,6 +2905,90 @@ func (x *RemotePeerConfig) GetAgentVersion() string { return "" } +func (x *RemotePeerConfig) GetEffectiveConnectionMode() string { + if x != nil { + return x.EffectiveConnectionMode + } + return "" +} + +func (x *RemotePeerConfig) GetEffectiveRelayTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveRelayTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetEffectiveP2PTimeoutSecs() uint32 { + if x != nil { + return x.EffectiveP2PTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetEffectiveP2PRetryMaxSecs() uint32 { + if x != nil { + return x.EffectiveP2PRetryMaxSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredConnectionMode() string { + if x != nil { + return x.ConfiguredConnectionMode + } + return "" +} + +func (x *RemotePeerConfig) GetConfiguredRelayTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredRelayTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredP2PTimeoutSecs() uint32 { + if x != nil { + return x.ConfiguredP2PTimeoutSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetConfiguredP2PRetryMaxSecs() uint32 { + if x != nil { + return x.ConfiguredP2PRetryMaxSecs + } + return 0 +} + +func (x *RemotePeerConfig) GetLastSeenAtServer() *timestamppb.Timestamp { + if x != nil { + return x.LastSeenAtServer + } + return nil +} + +func (x *RemotePeerConfig) GetGroups() []string { + if x != nil { + return x.Groups + } + return nil +} + +func (x *RemotePeerConfig) GetLiveOnline() bool { + if x != nil { + return x.LiveOnline + } + return false +} + +func (x *RemotePeerConfig) GetServerLivenessKnown() bool { + if x != nil { + return x.ServerLivenessKnown + } + return false +} + // SSHConfig represents SSH configurations of a peer. type SSHConfig struct { state protoimpl.MessageState @@ -4385,6 +4708,229 @@ func (*StopExposeResponse) Descriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{52} } +// Phase 3.7i (#5989): per-peer connection-state push payload (encrypted +// body of SyncPeerConnections request). +type PeerConnectionMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Seq uint64 `protobuf:"varint,1,opt,name=seq,proto3" json:"seq,omitempty"` + FullSnapshot bool `protobuf:"varint,2,opt,name=full_snapshot,json=fullSnapshot,proto3" json:"full_snapshot,omitempty"` + Entries []*PeerConnectionEntry `protobuf:"bytes,3,rep,name=entries,proto3" json:"entries,omitempty"` + InResponseToNonce uint64 `protobuf:"varint,4,opt,name=in_response_to_nonce,json=inResponseToNonce,proto3" json:"in_response_to_nonce,omitempty"` +} + +func (x *PeerConnectionMap) Reset() { + *x = PeerConnectionMap{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerConnectionMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerConnectionMap) ProtoMessage() {} + +func (x *PeerConnectionMap) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerConnectionMap.ProtoReflect.Descriptor instead. +func (*PeerConnectionMap) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{53} +} + +func (x *PeerConnectionMap) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *PeerConnectionMap) GetFullSnapshot() bool { + if x != nil { + return x.FullSnapshot + } + return false +} + +func (x *PeerConnectionMap) GetEntries() []*PeerConnectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *PeerConnectionMap) GetInResponseToNonce() uint64 { + if x != nil { + return x.InResponseToNonce + } + return 0 +} + +type PeerConnectionEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RemotePubkey string `protobuf:"bytes,1,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` + ConnType ConnType `protobuf:"varint,2,opt,name=conn_type,json=connType,proto3,enum=management.ConnType" json:"conn_type,omitempty"` + LastHandshake *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_handshake,json=lastHandshake,proto3" json:"last_handshake,omitempty"` + LatencyMs uint32 `protobuf:"varint,4,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"` + Endpoint string `protobuf:"bytes,5,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + RelayServer string `protobuf:"bytes,6,opt,name=relay_server,json=relayServer,proto3" json:"relay_server,omitempty"` + RxBytes uint64 `protobuf:"varint,7,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + TxBytes uint64 `protobuf:"varint,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` +} + +func (x *PeerConnectionEntry) Reset() { + *x = PeerConnectionEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerConnectionEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerConnectionEntry) ProtoMessage() {} + +func (x *PeerConnectionEntry) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerConnectionEntry.ProtoReflect.Descriptor instead. +func (*PeerConnectionEntry) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{54} +} + +func (x *PeerConnectionEntry) GetRemotePubkey() string { + if x != nil { + return x.RemotePubkey + } + return "" +} + +func (x *PeerConnectionEntry) GetConnType() ConnType { + if x != nil { + return x.ConnType + } + return ConnType_CONN_TYPE_UNSPECIFIED +} + +func (x *PeerConnectionEntry) GetLastHandshake() *timestamppb.Timestamp { + if x != nil { + return x.LastHandshake + } + return nil +} + +func (x *PeerConnectionEntry) GetLatencyMs() uint32 { + if x != nil { + return x.LatencyMs + } + return 0 +} + +func (x *PeerConnectionEntry) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *PeerConnectionEntry) GetRelayServer() string { + if x != nil { + return x.RelayServer + } + return "" +} + +func (x *PeerConnectionEntry) GetRxBytes() uint64 { + if x != nil { + return x.RxBytes + } + return 0 +} + +func (x *PeerConnectionEntry) GetTxBytes() uint64 { + if x != nil { + return x.TxBytes + } + return 0 +} + +type PeerSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *PeerSnapshotRequest) Reset() { + *x = PeerSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerSnapshotRequest) ProtoMessage() {} + +func (x *PeerSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerSnapshotRequest.ProtoReflect.Descriptor instead. +func (*PeerSnapshotRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{55} +} + +func (x *PeerSnapshotRequest) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + type PortInfo_Range struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4397,7 +4943,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[54] + mi := &file_management_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4410,7 +4956,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[54] + mi := &file_management_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4489,7 +5035,7 @@ var file_management_proto_rawDesc = []byte{ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, + 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xa7, 0x03, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, @@ -4511,626 +5057,762 @@ var file_management_proto_rawDesc = []byte{ 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, - 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, - 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, - 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, - 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, - 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, - 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, - 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, - 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, - 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, - 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, - 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x4a, 0x0a, 0x10, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, + 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x44, 0x0a, + 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, + 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, + 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, + 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x10, + 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, + 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x22, 0x0a, 0x0c, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0d, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, + 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, - 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, - 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, 0x0a, 0x0e, 0x50, 0x65, 0x65, - 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, - 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, - 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, - 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, + 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, 0x0e, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, + 0x41, 0x75, 0x74, 0x68, 0x22, 0xec, 0x06, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, + 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, + 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x4f, 0x53, 0x12, + 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x69, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4f, + 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, - 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, - 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, - 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, - 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, - 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xb4, 0x01, - 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x73, + 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, + 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x32, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x33, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, + 0x73, 0x65, 0x63, 0x73, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, + 0x65, 0x63, 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, + 0x65, 0x63, 0x73, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, + 0x65, 0x63, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, + 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x38, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, + 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, + 0x0a, 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, + 0x74, 0x75, 0x72, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, + 0x65, 0x6c, 0x61, 0x79, 0x12, 0x2a, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x69, 0x12, 0x3b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, + 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, + 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x54, 0x54, 0x50, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, + 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, + 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, + 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, + 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, + 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, + 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, + 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, + 0xb3, 0x04, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, + 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, + 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, + 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, + 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, + 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, + 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x32, 0x70, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x61, 0x79, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x32, 0x70, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, + 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, - 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, - 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x05, 0x74, 0x75, 0x72, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x12, - 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, - 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x2a, - 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x48, - 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x02, - 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, - 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2e, - 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x78, 0x69, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, - 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, - 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6b, 0x65, - 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, - 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, - 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, - 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, - 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, - 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, - 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, - 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, - 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, - 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, - 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, - 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, - 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, - 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, - 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, - 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, - 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, - 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, - 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, - 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, - 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, - 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, - 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, - 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, - 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, - 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, - 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, - 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, - 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, + 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, + 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, + 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, + 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, + 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, + 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, + 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, + 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, + 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, + 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, + 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xef, 0x06, 0x0a, 0x10, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, + 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, + 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, + 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, + 0x65, 0x63, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x53, 0x65, 0x63, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, + 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, + 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, + 0x32, 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, + 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, + 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x41, 0x0a, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, + 0x63, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, + 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x64, 0x50, 0x32, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, + 0x73, 0x12, 0x40, 0x0a, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x5f, + 0x70, 0x32, 0x70, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, + 0x63, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x64, 0x50, 0x32, 0x70, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x53, + 0x65, 0x63, 0x73, 0x12, 0x49, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, + 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6c, 0x61, + 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, + 0x0a, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x6f, + 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x69, 0x76, + 0x65, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, + 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, + 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, + 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, + 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, + 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, - 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, - 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, - 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, - 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, - 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, - 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, - 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, - 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, + 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, + 0x1e, 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x5b, 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, + 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, - 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, - 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, - 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, - 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, - 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, - 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, - 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, - 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, - 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, - 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, - 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, - 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, - 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, - 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, - 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, - 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, - 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, - 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, - 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, - 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, - 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, - 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, - 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, - 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, - 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, - 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, - 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, - 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, - 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, - 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, - 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, - 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, - 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, - 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, - 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, - 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, - 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, - 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, - 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, - 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, + 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, + 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, + 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, + 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, + 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, + 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, + 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, + 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, + 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, + 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, + 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, + 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, + 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, + 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, + 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, + 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, + 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, + 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, + 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, + 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, + 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, + 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, + 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, + 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x11, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x70, 0x12, + 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, + 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x39, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x6f, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x22, 0xc4, 0x02, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, + 0x31, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x5f, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x4d, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x07, 0x74, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x10, 0x02, 0x2a, 0xab, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x59, 0x5f, + 0x46, 0x4f, 0x52, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x10, + 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x5f, 0x4c, 0x41, 0x5a, 0x59, 0x10, 0x03, 0x12, + 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, + 0x44, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x04, + 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, + 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, + 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, + 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, + 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, + 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, + 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, + 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, + 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, + 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x2a, 0x7d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x44, + 0x4c, 0x45, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, + 0x0a, 0x0d, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x50, 0x32, 0x50, 0x10, + 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, + 0x45, 0x4c, 0x41, 0x59, 0x45, 0x44, 0x10, 0x04, 0x32, 0xc7, 0x07, 0x0a, 0x11, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, + 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, - 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, + 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, + 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, - 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, + 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, - 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x13, 0x53, + 0x79, 0x6e, 0x63, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, + 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, - 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, + 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, - 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5145,166 +5827,179 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 55) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 58) var file_management_proto_goTypes = []interface{}{ (JobStatus)(0), // 0: management.JobStatus - (RuleProtocol)(0), // 1: management.RuleProtocol - (RuleDirection)(0), // 2: management.RuleDirection - (RuleAction)(0), // 3: management.RuleAction - (ExposeProtocol)(0), // 4: management.ExposeProtocol - (HostConfig_Protocol)(0), // 5: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 6: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 7: management.EncryptedMessage - (*JobRequest)(nil), // 8: management.JobRequest - (*JobResponse)(nil), // 9: management.JobResponse - (*BundleParameters)(nil), // 10: management.BundleParameters - (*BundleResult)(nil), // 11: management.BundleResult - (*SyncRequest)(nil), // 12: management.SyncRequest - (*SyncResponse)(nil), // 13: management.SyncResponse - (*SyncMetaRequest)(nil), // 14: management.SyncMetaRequest - (*LoginRequest)(nil), // 15: management.LoginRequest - (*PeerKeys)(nil), // 16: management.PeerKeys - (*Environment)(nil), // 17: management.Environment - (*File)(nil), // 18: management.File - (*Flags)(nil), // 19: management.Flags - (*PeerSystemMeta)(nil), // 20: management.PeerSystemMeta - (*LoginResponse)(nil), // 21: management.LoginResponse - (*ServerKeyResponse)(nil), // 22: management.ServerKeyResponse - (*Empty)(nil), // 23: management.Empty - (*NetbirdConfig)(nil), // 24: management.NetbirdConfig - (*HostConfig)(nil), // 25: management.HostConfig - (*RelayConfig)(nil), // 26: management.RelayConfig - (*FlowConfig)(nil), // 27: management.FlowConfig - (*JWTConfig)(nil), // 28: management.JWTConfig - (*ProtectedHostConfig)(nil), // 29: management.ProtectedHostConfig - (*PeerConfig)(nil), // 30: management.PeerConfig - (*AutoUpdateSettings)(nil), // 31: management.AutoUpdateSettings - (*NetworkMap)(nil), // 32: management.NetworkMap - (*SSHAuth)(nil), // 33: management.SSHAuth - (*MachineUserIndexes)(nil), // 34: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 35: management.RemotePeerConfig - (*SSHConfig)(nil), // 36: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 37: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 38: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 39: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 40: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 41: management.ProviderConfig - (*Route)(nil), // 42: management.Route - (*DNSConfig)(nil), // 43: management.DNSConfig - (*CustomZone)(nil), // 44: management.CustomZone - (*SimpleRecord)(nil), // 45: management.SimpleRecord - (*NameServerGroup)(nil), // 46: management.NameServerGroup - (*NameServer)(nil), // 47: management.NameServer - (*FirewallRule)(nil), // 48: management.FirewallRule - (*NetworkAddress)(nil), // 49: management.NetworkAddress - (*Checks)(nil), // 50: management.Checks - (*PortInfo)(nil), // 51: management.PortInfo - (*RouteFirewallRule)(nil), // 52: management.RouteFirewallRule - (*ForwardingRule)(nil), // 53: management.ForwardingRule - (*ExposeServiceRequest)(nil), // 54: management.ExposeServiceRequest - (*ExposeServiceResponse)(nil), // 55: management.ExposeServiceResponse - (*RenewExposeRequest)(nil), // 56: management.RenewExposeRequest - (*RenewExposeResponse)(nil), // 57: management.RenewExposeResponse - (*StopExposeRequest)(nil), // 58: management.StopExposeRequest - (*StopExposeResponse)(nil), // 59: management.StopExposeResponse - nil, // 60: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 61: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 62: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 63: google.protobuf.Duration + (ConnectionMode)(0), // 1: management.ConnectionMode + (RuleProtocol)(0), // 2: management.RuleProtocol + (RuleDirection)(0), // 3: management.RuleDirection + (RuleAction)(0), // 4: management.RuleAction + (ExposeProtocol)(0), // 5: management.ExposeProtocol + (ConnType)(0), // 6: management.ConnType + (HostConfig_Protocol)(0), // 7: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 8: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 9: management.EncryptedMessage + (*JobRequest)(nil), // 10: management.JobRequest + (*JobResponse)(nil), // 11: management.JobResponse + (*BundleParameters)(nil), // 12: management.BundleParameters + (*BundleResult)(nil), // 13: management.BundleResult + (*SyncRequest)(nil), // 14: management.SyncRequest + (*SyncResponse)(nil), // 15: management.SyncResponse + (*SyncMetaRequest)(nil), // 16: management.SyncMetaRequest + (*LoginRequest)(nil), // 17: management.LoginRequest + (*PeerKeys)(nil), // 18: management.PeerKeys + (*Environment)(nil), // 19: management.Environment + (*File)(nil), // 20: management.File + (*Flags)(nil), // 21: management.Flags + (*PeerSystemMeta)(nil), // 22: management.PeerSystemMeta + (*LoginResponse)(nil), // 23: management.LoginResponse + (*ServerKeyResponse)(nil), // 24: management.ServerKeyResponse + (*Empty)(nil), // 25: management.Empty + (*NetbirdConfig)(nil), // 26: management.NetbirdConfig + (*HostConfig)(nil), // 27: management.HostConfig + (*RelayConfig)(nil), // 28: management.RelayConfig + (*FlowConfig)(nil), // 29: management.FlowConfig + (*JWTConfig)(nil), // 30: management.JWTConfig + (*ProtectedHostConfig)(nil), // 31: management.ProtectedHostConfig + (*PeerConfig)(nil), // 32: management.PeerConfig + (*AutoUpdateSettings)(nil), // 33: management.AutoUpdateSettings + (*NetworkMap)(nil), // 34: management.NetworkMap + (*SSHAuth)(nil), // 35: management.SSHAuth + (*MachineUserIndexes)(nil), // 36: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 37: management.RemotePeerConfig + (*SSHConfig)(nil), // 38: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 39: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 40: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 41: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 42: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 43: management.ProviderConfig + (*Route)(nil), // 44: management.Route + (*DNSConfig)(nil), // 45: management.DNSConfig + (*CustomZone)(nil), // 46: management.CustomZone + (*SimpleRecord)(nil), // 47: management.SimpleRecord + (*NameServerGroup)(nil), // 48: management.NameServerGroup + (*NameServer)(nil), // 49: management.NameServer + (*FirewallRule)(nil), // 50: management.FirewallRule + (*NetworkAddress)(nil), // 51: management.NetworkAddress + (*Checks)(nil), // 52: management.Checks + (*PortInfo)(nil), // 53: management.PortInfo + (*RouteFirewallRule)(nil), // 54: management.RouteFirewallRule + (*ForwardingRule)(nil), // 55: management.ForwardingRule + (*ExposeServiceRequest)(nil), // 56: management.ExposeServiceRequest + (*ExposeServiceResponse)(nil), // 57: management.ExposeServiceResponse + (*RenewExposeRequest)(nil), // 58: management.RenewExposeRequest + (*RenewExposeResponse)(nil), // 59: management.RenewExposeResponse + (*StopExposeRequest)(nil), // 60: management.StopExposeRequest + (*StopExposeResponse)(nil), // 61: management.StopExposeResponse + (*PeerConnectionMap)(nil), // 62: management.PeerConnectionMap + (*PeerConnectionEntry)(nil), // 63: management.PeerConnectionEntry + (*PeerSnapshotRequest)(nil), // 64: management.PeerSnapshotRequest + nil, // 65: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 66: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 67: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 68: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 10, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 12, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters 0, // 1: management.JobResponse.status:type_name -> management.JobStatus - 11, // 2: management.JobResponse.bundle:type_name -> management.BundleResult - 20, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 24, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 35, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 32, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 50, // 8: management.SyncResponse.Checks:type_name -> management.Checks - 20, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 20, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 16, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 49, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 17, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment - 18, // 14: management.PeerSystemMeta.files:type_name -> management.File - 19, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags - 24, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 30, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 50, // 18: management.LoginResponse.Checks:type_name -> management.Checks - 62, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 25, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 29, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 25, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig - 26, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 27, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 5, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 63, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 25, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 36, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 31, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 30, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 35, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 42, // 32: management.NetworkMap.Routes:type_name -> management.Route - 43, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 35, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 48, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 52, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 53, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 33, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 60, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 36, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 28, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 6, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 41, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 41, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 46, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 44, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 45, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord - 47, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer - 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection - 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction - 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 51, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 61, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range - 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction - 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 51, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo - 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 51, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 51, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 4, // 60: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol - 34, // 61: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 7, // 62: management.ManagementService.Login:input_type -> management.EncryptedMessage - 7, // 63: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 23, // 64: management.ManagementService.GetServerKey:input_type -> management.Empty - 23, // 65: management.ManagementService.isHealthy:input_type -> management.Empty - 7, // 66: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 67: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 7, // 68: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 7, // 69: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 7, // 70: management.ManagementService.Job:input_type -> management.EncryptedMessage - 7, // 71: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage - 7, // 72: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage - 7, // 73: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage - 7, // 74: management.ManagementService.Login:output_type -> management.EncryptedMessage - 7, // 75: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 22, // 76: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 23, // 77: management.ManagementService.isHealthy:output_type -> management.Empty - 7, // 78: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 7, // 79: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 23, // 80: management.ManagementService.SyncMeta:output_type -> management.Empty - 23, // 81: management.ManagementService.Logout:output_type -> management.Empty - 7, // 82: management.ManagementService.Job:output_type -> management.EncryptedMessage - 7, // 83: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage - 7, // 84: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage - 7, // 85: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage - 74, // [74:86] is the sub-list for method output_type - 62, // [62:74] is the sub-list for method input_type - 62, // [62:62] is the sub-list for extension type_name - 62, // [62:62] is the sub-list for extension extendee - 0, // [0:62] is the sub-list for field type_name + 13, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 22, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 26, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 32, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 37, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 34, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 52, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 64, // 9: management.SyncResponse.snapshot_request:type_name -> management.PeerSnapshotRequest + 22, // 10: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 22, // 11: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 18, // 12: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 51, // 13: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 19, // 14: management.PeerSystemMeta.environment:type_name -> management.Environment + 20, // 15: management.PeerSystemMeta.files:type_name -> management.File + 21, // 16: management.PeerSystemMeta.flags:type_name -> management.Flags + 26, // 17: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 32, // 18: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 52, // 19: management.LoginResponse.Checks:type_name -> management.Checks + 67, // 20: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 27, // 21: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 31, // 22: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 27, // 23: management.NetbirdConfig.signal:type_name -> management.HostConfig + 28, // 24: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 29, // 25: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 7, // 26: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 68, // 27: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 27, // 28: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 38, // 29: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 33, // 30: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 1, // 31: management.PeerConfig.ConnectionMode:type_name -> management.ConnectionMode + 32, // 32: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 37, // 33: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 44, // 34: management.NetworkMap.Routes:type_name -> management.Route + 45, // 35: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 37, // 36: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 50, // 37: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 54, // 38: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 55, // 39: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 35, // 40: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 65, // 41: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 38, // 42: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 67, // 43: management.RemotePeerConfig.last_seen_at_server:type_name -> google.protobuf.Timestamp + 30, // 44: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 8, // 45: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 43, // 46: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 43, // 47: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 48, // 48: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 46, // 49: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 47, // 50: management.CustomZone.Records:type_name -> management.SimpleRecord + 49, // 51: management.NameServerGroup.NameServers:type_name -> management.NameServer + 3, // 52: management.FirewallRule.Direction:type_name -> management.RuleDirection + 4, // 53: management.FirewallRule.Action:type_name -> management.RuleAction + 2, // 54: management.FirewallRule.Protocol:type_name -> management.RuleProtocol + 53, // 55: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 66, // 56: management.PortInfo.range:type_name -> management.PortInfo.Range + 4, // 57: management.RouteFirewallRule.action:type_name -> management.RuleAction + 2, // 58: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol + 53, // 59: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 2, // 60: management.ForwardingRule.protocol:type_name -> management.RuleProtocol + 53, // 61: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 53, // 62: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 5, // 63: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol + 63, // 64: management.PeerConnectionMap.entries:type_name -> management.PeerConnectionEntry + 6, // 65: management.PeerConnectionEntry.conn_type:type_name -> management.ConnType + 67, // 66: management.PeerConnectionEntry.last_handshake:type_name -> google.protobuf.Timestamp + 36, // 67: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 9, // 68: management.ManagementService.Login:input_type -> management.EncryptedMessage + 9, // 69: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 25, // 70: management.ManagementService.GetServerKey:input_type -> management.Empty + 25, // 71: management.ManagementService.isHealthy:input_type -> management.Empty + 9, // 72: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 9, // 73: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 9, // 74: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 9, // 75: management.ManagementService.SyncPeerConnections:input_type -> management.EncryptedMessage + 9, // 76: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 9, // 77: management.ManagementService.Job:input_type -> management.EncryptedMessage + 9, // 78: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage + 9, // 79: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage + 9, // 80: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage + 9, // 81: management.ManagementService.Login:output_type -> management.EncryptedMessage + 9, // 82: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 24, // 83: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 25, // 84: management.ManagementService.isHealthy:output_type -> management.Empty + 9, // 85: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 9, // 86: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 25, // 87: management.ManagementService.SyncMeta:output_type -> management.Empty + 25, // 88: management.ManagementService.SyncPeerConnections:output_type -> management.Empty + 25, // 89: management.ManagementService.Logout:output_type -> management.Empty + 9, // 90: management.ManagementService.Job:output_type -> management.EncryptedMessage + 9, // 91: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage + 9, // 92: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage + 9, // 93: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage + 81, // [81:94] is the sub-list for method output_type + 68, // [68:81] is the sub-list for method input_type + 68, // [68:68] is the sub-list for extension type_name + 68, // [68:68] is the sub-list for extension extendee + 0, // [0:68] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -5949,7 +6644,43 @@ func file_management_proto_init() { return nil } } + file_management_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerConnectionMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_management_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerConnectionEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -5977,8 +6708,8 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 7, - NumMessages: 55, + NumEnums: 9, + NumMessages: 58, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 70a53067974..796319da94c 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -46,6 +46,12 @@ service ManagementService { // EncryptedMessage of the request has a body of Empty. rpc SyncMeta(EncryptedMessage) returns (Empty) {} + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + rpc SyncPeerConnections(EncryptedMessage) returns (Empty) {} + // Logout logs out the peer and removes it from the management server rpc Logout(EncryptedMessage) returns (Empty) {} @@ -133,6 +139,11 @@ message SyncResponse { // Posture checks to be evaluated by client repeated Checks Checks = 6; + + // Phase 3.7i (#5989): on-demand refresh request for the peer's + // connection map. Peer responds via SyncPeerConnections RPC with + // in_response_to_nonce echoing this nonce. + PeerSnapshotRequest snapshot_request = 7; } message SyncMetaRequest { @@ -221,6 +232,13 @@ message PeerSystemMeta { Environment environment = 15; repeated File files = 16; Flags flags = 17; + + // Phase 3.7i (#5989): connection mode/timeouts this peer is actually + // running with. Mgmt copies into RemotePeerConfig of every other peer. + string effective_connection_mode = 50; + uint32 effective_relay_timeout_secs = 51; + uint32 effective_p2p_timeout_secs = 52; + uint32 effective_p2p_retry_max_secs = 53; } message LoginResponse { @@ -335,6 +353,48 @@ message PeerConfig { // Auto-update config AutoUpdateSettings autoUpdate = 8; + + // Tags 9 and 10 are intentionally left unused so that future small + // additions can land without re-numbering the new connection-mode + // fields. Reserved here to make the gap explicit for any reviewer. + reserved 9, 10; + + // Connection-mode resolved by the management server. UNSPECIFIED = use + // legacy LazyConnectionEnabled fallback. Added in Phase 1 (#5989). + ConnectionMode ConnectionMode = 11; + + // Idle timeout for the ICE worker in seconds. 0 = never tear down. + // Effective in p2p-dynamic mode (added in Phase 2). Sent unconditionally + // for forward-compat. Added in Phase 1 (#5989). + uint32 P2pTimeoutSeconds = 12; + + // Idle timeout for the relay worker in seconds. 0 = never tear down. + // Effective in p2p-lazy and p2p-dynamic modes. Backwards-compat alias for + // NB_LAZY_CONN_INACTIVITY_THRESHOLD. Added in Phase 1 (#5989). + uint32 RelayTimeoutSeconds = 13; + + // P2pRetryMaxSeconds is the maximum interval between P2P retry attempts + // after consecutive ICE failures, in seconds. Effective only in + // p2p-dynamic mode (added in Phase 3 of #5989). + // + // Wire-format semantics: + // 0 -> "not set"; daemon uses built-in default (15 min) + // max -> "user explicitly disabled backoff"; daemon disables backoff + // else -> seconds, capped at this value in the cenkalti/backoff schedule + uint32 P2pRetryMaxSeconds = 14; +} + +// ConnectionMode controls how a peer establishes connections to other peers. +// Added in Phase 1 of the connection-mode consolidation (see issue #5989). +// CONNECTION_MODE_UNSPECIFIED is the proto default and means "fall back to +// the legacy LazyConnectionEnabled boolean field" -- required for backwards +// compatibility with old management servers that don't set this field. +enum ConnectionMode { + CONNECTION_MODE_UNSPECIFIED = 0; + CONNECTION_MODE_RELAY_FORCED = 1; + CONNECTION_MODE_P2P = 2; + CONNECTION_MODE_P2P_LAZY = 3; + CONNECTION_MODE_P2P_DYNAMIC = 4; } message AutoUpdateSettings { @@ -421,6 +481,40 @@ message RemotePeerConfig { string fqdn = 4; string agentVersion = 5; + + // Phase 3.7i (#5989): connection mode/timeouts the remote peer is + // actually running with (after env > local-cfg > server-pushed > legacy + // resolution), as reported by that peer in its PeerSystemMeta. Empty + // when remote peer pre-dates Phase 3.7i. + string effective_connection_mode = 6; + uint32 effective_relay_timeout_secs = 7; + uint32 effective_p2p_timeout_secs = 8; + uint32 effective_p2p_retry_max_secs = 9; + + // Connection mode/timeouts the management server has configured for + // that peer via dashboard policy/group. UI compares effective vs + // configured to spot local overrides (≠ → ⚠). + string configured_connection_mode = 10; + uint32 configured_relay_timeout_secs = 11; + uint32 configured_p2p_timeout_secs = 12; + uint32 configured_p2p_retry_max_secs = 13; + + // Phase 3.7i: server-knowledge fields surfaced to UIs without an + // extra Mgmt API call (already in the NetworkMap stream context). + google.protobuf.Timestamp last_seen_at_server = 14; + repeated string groups = 15; + // Live online flag: peer.Status.Connected on the management server. + // True = peer is currently heartbeating. False = peer hasn't checked + // in (hardware/network down) but its login is still valid (otherwise + // it would be in OfflinePeers, not RemotePeers). + bool live_online = 16; + + // Server-knowledge marker: true when the management server is new + // enough to populate live_online authoritatively. Old servers leave + // this field at false (default), and new clients then fall back to + // legacy heuristics (assume online when live_online is false but + // last_seen_at_server is also unset, i.e. nothing is known). + bool server_liveness_known = 17; } // SSHConfig represents SSH configurations of a peer. @@ -684,3 +778,35 @@ message StopExposeRequest { } message StopExposeResponse {} + +// Phase 3.7i (#5989): per-peer connection-state push payload (encrypted +// body of SyncPeerConnections request). +message PeerConnectionMap { + uint64 seq = 1; + bool full_snapshot = 2; + repeated PeerConnectionEntry entries = 3; + uint64 in_response_to_nonce = 4; +} + +message PeerConnectionEntry { + string remote_pubkey = 1; + ConnType conn_type = 2; + google.protobuf.Timestamp last_handshake = 3; + uint32 latency_ms = 4; + string endpoint = 5; + string relay_server = 6; + uint64 rx_bytes = 7; + uint64 tx_bytes = 8; +} + +enum ConnType { + CONN_TYPE_UNSPECIFIED = 0; + CONN_TYPE_IDLE = 1; + CONN_TYPE_CONNECTING = 2; + CONN_TYPE_P2P = 3; + CONN_TYPE_RELAYED = 4; +} + +message PeerSnapshotRequest { + uint64 nonce = 1; +} diff --git a/shared/management/proto/management_grpc.pb.go b/shared/management/proto/management_grpc.pb.go index 39a34204115..e6dd35bf1bb 100644 --- a/shared/management/proto/management_grpc.pb.go +++ b/shared/management/proto/management_grpc.pb.go @@ -48,6 +48,11 @@ type ManagementServiceClient interface { // sync meta will evaluate the checks and update the peer meta with the result. // EncryptedMessage of the request has a body of Empty. SyncMeta(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + SyncPeerConnections(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) @@ -154,6 +159,15 @@ func (c *managementServiceClient) SyncMeta(ctx context.Context, in *EncryptedMes return out, nil } +func (c *managementServiceClient) SyncPeerConnections(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/management.ManagementService/SyncPeerConnections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *managementServiceClient) Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/management.ManagementService/Logout", in, out, opts...) @@ -255,6 +269,11 @@ type ManagementServiceServer interface { // sync meta will evaluate the checks and update the peer meta with the result. // EncryptedMessage of the request has a body of Empty. SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) + // Phase 3.7i (#5989): per-peer connection-state push from peer to + // mgmt. Body decrypts to PeerConnectionMap. Unary because the + // existing Sync is server-streaming (client cannot inject extra + // frames). + SyncPeerConnections(context.Context, *EncryptedMessage) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(context.Context, *EncryptedMessage) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) @@ -293,6 +312,9 @@ func (UnimplementedManagementServiceServer) GetPKCEAuthorizationFlow(context.Con func (UnimplementedManagementServiceServer) SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SyncMeta not implemented") } +func (UnimplementedManagementServiceServer) SyncPeerConnections(context.Context, *EncryptedMessage) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncPeerConnections not implemented") +} func (UnimplementedManagementServiceServer) Logout(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } @@ -450,6 +472,24 @@ func _ManagementService_SyncMeta_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _ManagementService_SyncPeerConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptedMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServiceServer).SyncPeerConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ManagementService/SyncPeerConnections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServiceServer).SyncPeerConnections(ctx, req.(*EncryptedMessage)) + } + return interceptor(ctx, in, info, handler) +} + func _ManagementService_Logout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EncryptedMessage) if err := dec(in); err != nil { @@ -579,6 +619,10 @@ var ManagementService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SyncMeta", Handler: _ManagementService_SyncMeta_Handler, }, + { + MethodName: "SyncPeerConnections", + Handler: _ManagementService_SyncPeerConnections_Handler, + }, { MethodName: "Logout", Handler: _ManagementService_Logout_Handler,