Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion client/internal/expose/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) {
}

func (m *Manager) KeepAlive(ctx context.Context, domain string) error {
ticker := time.NewTicker(10 * time.Second)
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
defer m.stop(domain)

Expand Down
8 changes: 4 additions & 4 deletions management/internals/modules/reverseproxy/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ type Manager interface {
GetServiceByID(ctx context.Context, accountID, serviceID string) (*Service, error)
GetAccountServices(ctx context.Context, accountID string) ([]*Service, error)
GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error)
ValidateExposePermission(ctx context.Context, accountID, peerID string) error
CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *Service) (*Service, error)
DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error
ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error
CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *ExposeServiceRequest) (*ExposeServiceResponse, error)
RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error
StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error
StartExposeReaper(ctx context.Context)
}
112 changes: 55 additions & 57 deletions management/internals/modules/reverseproxy/interface_mock.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

163 changes: 163 additions & 0 deletions management/internals/modules/reverseproxy/manager/expose_tracker.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
package manager

import (
"context"
"sync"
"time"

"github.com/netbirdio/netbird/shared/management/status"
log "github.com/sirupsen/logrus"
)

const (
exposeTTL = 90 * time.Second
exposeReapInterval = 30 * time.Second
maxExposesPerPeer = 10
)

type trackedExpose struct {
mu sync.Mutex
domain string
accountID string
peerID string
lastRenewed time.Time
expiring bool
}

type exposeTracker struct {
activeExposes sync.Map
exposeCreateMu sync.Mutex
manager *managerImpl
}

func exposeKey(peerID, domain string) string {
return peerID + ":" + domain
}

// TrackExposeIfAllowed atomically checks the per-peer limit and registers a new
// active expose session under the same lock. Returns (true, false) if the expose
// was already tracked (duplicate), (false, true) if tracking succeeded, and
// (false, false) if the peer has reached the limit.
func (t *exposeTracker) TrackExposeIfAllowed(peerID, domain, accountID string) (alreadyTracked, ok bool) {
t.exposeCreateMu.Lock()
defer t.exposeCreateMu.Unlock()

key := exposeKey(peerID, domain)
_, loaded := t.activeExposes.LoadOrStore(key, &trackedExpose{
domain: domain,
accountID: accountID,
peerID: peerID,
lastRenewed: time.Now(),
})
if loaded {
return true, false
}

if t.CountPeerExposes(peerID) > maxExposesPerPeer {
t.activeExposes.Delete(key)
return false, false
}

return false, true
}

// UntrackExpose removes an active expose session from tracking.
func (t *exposeTracker) UntrackExpose(peerID, domain string) {
t.activeExposes.Delete(exposeKey(peerID, domain))
}

// CountPeerExposes returns the number of active expose sessions for a peer.
func (t *exposeTracker) CountPeerExposes(peerID string) int {
count := 0
t.activeExposes.Range(func(_, val any) bool {
if expose := val.(*trackedExpose); expose.peerID == peerID {
count++
}
return true
})
return count
}

// MaxExposesPerPeer returns the maximum number of concurrent exposes allowed per peer.
func (t *exposeTracker) MaxExposesPerPeer() int {
return maxExposesPerPeer
}

// RenewTrackedExpose updates the in-memory lastRenewed timestamp for a tracked expose.
// Returns false if the expose is not tracked or is being reaped.
func (t *exposeTracker) RenewTrackedExpose(peerID, domain string) bool {
key := exposeKey(peerID, domain)
val, ok := t.activeExposes.Load(key)
if !ok {
return false
}

expose := val.(*trackedExpose)
expose.mu.Lock()
if expose.expiring {
expose.mu.Unlock()
return false
}
expose.lastRenewed = time.Now()
expose.mu.Unlock()

return true
}

// StopTrackedExpose removes an active expose session from tracking.
// Returns false if the expose was not tracked.
func (t *exposeTracker) StopTrackedExpose(peerID, domain string) bool {
key := exposeKey(peerID, domain)
_, ok := t.activeExposes.LoadAndDelete(key)
return ok
}

// StartExposeReaper starts a background goroutine that reaps expired expose sessions.
func (t *exposeTracker) StartExposeReaper(ctx context.Context) {
go func() {
ticker := time.NewTicker(exposeReapInterval)
defer ticker.Stop()

for {
select {
case <-ctx.Done():
return
case <-ticker.C:
t.reapExpiredExposes()
}
}
}()
}

func (t *exposeTracker) reapExpiredExposes() {
t.activeExposes.Range(func(key, val any) bool {
expose := val.(*trackedExpose)
expose.mu.Lock()
expired := time.Since(expose.lastRenewed) > exposeTTL
if expired {
expose.expiring = true
}
expose.mu.Unlock()

if !expired {
return true
}

log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain)

err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true)

s, _ := status.FromError(err)

switch {
case err == nil:
t.activeExposes.Delete(key)
case s.ErrorType == status.NotFound:
log.Debugf("service %s was already deleted", expose.domain)
default:
log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err)
Comment on lines +148 to +158
Copy link
Copy Markdown
Contributor

@coderabbitai coderabbitai Bot Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Guard status.FromError to avoid nil deref panic.

Line 150 assigns s without checking ok. If deleteServiceFromPeer returns a non-status.Error, s is nil and s.ErrorType will panic, crashing the reaper goroutine.

🛠️ Suggested fix
-		s, _ := status.FromError(err)
+		s, ok := status.FromError(err)

 		switch {
 		case err == nil:
 			t.activeExposes.Delete(key)
-		case s.ErrorType == status.NotFound:
+		case ok && s != nil && s.ErrorType == status.NotFound:
 			log.Debugf("service %s was already deleted", expose.domain)
 		default:
 			log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err)
 		}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true)
s, _ := status.FromError(err)
switch {
case err == nil:
t.activeExposes.Delete(key)
case s.ErrorType == status.NotFound:
log.Debugf("service %s was already deleted", expose.domain)
default:
log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err)
err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true)
s, ok := status.FromError(err)
switch {
case err == nil:
t.activeExposes.Delete(key)
case ok && s != nil && s.ErrorType == status.NotFound:
log.Debugf("service %s was already deleted", expose.domain)
default:
log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err)
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@management/internals/modules/reverseproxy/manager/expose_tracker.go` around
lines 148 - 158, The call to status.FromError is unguarded and s may be nil,
causing a panic when accessing s.ErrorType; update the error handling in the
block around t.manager.deleteServiceFromPeer(...) so you first check err: if err
== nil delete the key via t.activeExposes.Delete(key), otherwise call s,ok :=
status.FromError(err) and only inspect s.ErrorType when ok is true (handle
status.NotFound there); for non-status errors fall through to the default
logging branch that logs the error for expose.domain. Ensure you reference
t.manager.deleteServiceFromPeer, status.FromError, s (or s,ok),
t.activeExposes.Delete and expose.domain when making the change.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will fix in a new Pr

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mlsmaycon, understood! Would you like me to create a GitHub issue to track this nil pointer dereference fix for the follow-up PR? I can include the context and suggested fix to make it easier to address later.

}

return true
})
}
Loading
Loading