diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 2a015e13e29..142820c20e6 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -114,7 +114,7 @@ jobs: strategy: fail-fast: false matrix: - os: [windows-2019, windows-2022] + os: [windows-2022, windows-2025] runs-on: ${{ matrix.os }} steps: - name: Checkout diff --git a/.goreleaser.yml b/.goreleaser.yml index 83f728e17f4..60ef70a82a3 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -21,7 +21,7 @@ builds: env: # This is the toolchain version we use for releases. To override, set the env var, e.g.: # GORELEASER_TOOLCHAIN="go1.22.8" TARGET='linux_amd64' goreleaser build --snapshot --clean --single-target - - GOTOOLCHAIN={{ envOrDefault "GORELEASER_TOOLCHAIN" "go1.24.3" }} + - GOTOOLCHAIN={{ envOrDefault "GORELEASER_TOOLCHAIN" "go1.24.4" }} - GO111MODULE=on - CGO_ENABLED=0 goos: diff --git a/conf/fuzz.go b/conf/fuzz.go index 2db114ce72c..e42a82e25de 100644 --- a/conf/fuzz.go +++ b/conf/fuzz.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/conf/parse.go b/conf/parse.go index c1f064ae756..3e52c7d2287 100644 --- a/conf/parse.go +++ b/conf/parse.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/go.mod b/go.mod index 7f3af70e802..f30b4e7e778 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/nats-io/nats-server/v2 go 1.23.0 -toolchain go1.23.9 +toolchain go1.23.10 require ( github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op @@ -10,11 +10,11 @@ require ( github.com/klauspost/compress v1.18.0 github.com/minio/highwayhash v1.0.3 github.com/nats-io/jwt/v2 v2.7.4 - github.com/nats-io/nats.go v1.42.0 + github.com/nats-io/nats.go v1.43.0 github.com/nats-io/nkeys v0.4.11 github.com/nats-io/nuid v1.0.1 go.uber.org/automaxprocs v1.6.0 - golang.org/x/crypto v0.38.0 + golang.org/x/crypto v0.39.0 golang.org/x/sys v0.33.0 - golang.org/x/time v0.11.0 + golang.org/x/time v0.12.0 ) diff --git a/go.sum b/go.sum index a33ad54f5a7..932b7a55073 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/nats-io/jwt/v2 v2.7.4 h1:jXFuDDxs/GQjGDZGhNgH4tXzSUK6WQi2rsj4xmsNOtI= github.com/nats-io/jwt/v2 v2.7.4/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= -github.com/nats-io/nats.go v1.42.0 h1:ynIMupIOvf/ZWH/b2qda6WGKGNSjwOUutTpWRvAmhaM= -github.com/nats-io/nats.go v1.42.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= +github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -24,12 +24,12 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/testhelper/logging.go b/internal/testhelper/logging.go index 4397722e753..5fb7084f97a 100644 --- a/internal/testhelper/logging.go +++ b/internal/testhelper/logging.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/locksordering.txt b/locksordering.txt index 4a2c9e71721..7d750ac4a58 100644 --- a/locksordering.txt +++ b/locksordering.txt @@ -28,5 +28,9 @@ clearObserverState so that they cannot interleave which would leave Raft nodes i inconsistent observer states. jscmMu -> Account -> jsAccount - jscmMu -> stream.clsMu - jscmMu -> RaftNode \ No newline at end of file + jscmMu -> stream.clsMu + jscmMu -> RaftNode + +The "clsMu" lock protects the consumer list on a stream, used for signalling consumer activity. + + stream -> clsMu diff --git a/logger/log.go b/logger/log.go index 38473fd08a5..83889fb8bd4 100644 --- a/logger/log.go +++ b/logger/log.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/logger/log_test.go b/logger/log_test.go index 93d035df131..630fd803ed4 100644 --- a/logger/log_test.go +++ b/logger/log_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/logger/syslog.go b/logger/syslog.go index 211dd97cad8..0eb134ec5a4 100644 --- a/logger/syslog.go +++ b/logger/syslog.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/logger/syslog_test.go b/logger/syslog_test.go index b0e77f2a2d9..8a351870f67 100644 --- a/logger/syslog_test.go +++ b/logger/syslog_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/logger/syslog_windows.go b/logger/syslog_windows.go index c341a5d9695..176776b5b94 100644 --- a/logger/syslog_windows.go +++ b/logger/syslog_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/logger/syslog_windows_test.go b/logger/syslog_windows_test.go index edd8e8ccb4c..6a3190bf38f 100644 --- a/logger/syslog_windows_test.go +++ b/logger/syslog_windows_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/main.go b/main.go index a85eab97004..42f270c542c 100644 --- a/main.go +++ b/main.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/scripts/updateCopyrights.sh b/scripts/updateCopyrights.sh index 522d4f7e65f..8c47c328042 100755 --- a/scripts/updateCopyrights.sh +++ b/scripts/updateCopyrights.sh @@ -9,8 +9,8 @@ git ls-files "*.go" | while read -r file; do current_copyright=$(grep -oE "^// Copyright [0-9]{4}(-[0-9]{4})? The NATS Authors" "$file" || echo "") [[ -z "$current_copyright" ]] && continue - # Get the last commit year for the file - last_year=$(git log --follow --format="%ad" --date=format:%Y -- "$file" | head -1) + # Get the last commit year for the file, ignore commit messages containing the word "copyright" + last_year=$(git log --follow --format="%ad" --date=format:%Y --grep="(C|c)opyright" --invert-grep -n 1 -- "$file") existing_years=$(echo "$current_copyright" | grep -oE "[0-9]{4}(-[0-9]{4})?") # Determine the new copyright range diff --git a/server/auth.go b/server/auth.go index 992afc528ad..ce917a06e85 100644 --- a/server/auth.go +++ b/server/auth.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/auth_callout.go b/server/auth_callout.go index 3801d9eaa52..cc9e8db8117 100644 --- a/server/auth_callout.go +++ b/server/auth_callout.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/auth_callout_test.go b/server/auth_callout_test.go index 2c56201e739..ad6af9f2256 100644 --- a/server/auth_callout_test.go +++ b/server/auth_callout_test.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/avl/norace_test.go b/server/avl/norace_test.go index 7c8f2274f7e..d5c609f5616 100644 --- a/server/avl/norace_test.go +++ b/server/avl/norace_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/avl/seqset.go b/server/avl/seqset.go index 96ff3767362..de281d03048 100644 --- a/server/avl/seqset.go +++ b/server/avl/seqset.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/certstore/certstore.go b/server/certstore/certstore.go index 42e228e8060..3c8114fdfad 100644 --- a/server/certstore/certstore.go +++ b/server/certstore/certstore.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/certstore/certstore_other.go b/server/certstore/certstore_other.go index 18d62f8f550..185efc64e13 100644 --- a/server/certstore/certstore_other.go +++ b/server/certstore/certstore_other.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/certstore/certstore_windows.go b/server/certstore/certstore_windows.go index 8b710a618bb..96eaea8e953 100644 --- a/server/certstore/certstore_windows.go +++ b/server/certstore/certstore_windows.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/certstore_windows_test.go b/server/certstore_windows_test.go index 27c72a1db62..6cd50f515cd 100644 --- a/server/certstore_windows_test.go +++ b/server/certstore_windows_test.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/ciphersuites.go b/server/ciphersuites.go index bc594c51f5d..740db5128cb 100644 --- a/server/ciphersuites.go +++ b/server/ciphersuites.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/client.go b/server/client.go index 006db041b29..be734777cbc 100644 --- a/server/client.go +++ b/server/client.go @@ -3494,10 +3494,18 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su // Check if we are a leafnode and have perms to check. if client.kind == LEAF && client.perms != nil { - if !client.pubAllowedFullCheck(string(subject), true, true) { + var subjectToCheck []byte + if subject[0] == '_' && bytes.HasPrefix(subject, []byte(gwReplyPrefix)) { + subjectToCheck = subject[gwSubjectOffset:] + } else if subject[0] == '$' && bytes.HasPrefix(subject, []byte(oldGWReplyPrefix)) { + subjectToCheck = subject[oldGWReplyStart:] + } else { + subjectToCheck = subject + } + if !client.pubAllowedFullCheck(string(subjectToCheck), true, true) { mt.addEgressEvent(client, sub, errMsgTracePubViolation) client.mu.Unlock() - client.Debugf("Not permitted to deliver to %q", subject) + client.Debugf("Not permitted to deliver to %q", subjectToCheck) return false } } diff --git a/server/closed_conns_test.go b/server/closed_conns_test.go index fd6fac2a6f4..c3afb1139af 100644 --- a/server/closed_conns_test.go +++ b/server/closed_conns_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/config_check_test.go b/server/config_check_test.go index c60fbdd1916..0db9eecba5f 100644 --- a/server/config_check_test.go +++ b/server/config_check_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2024 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/const.go b/server/const.go index 9753a62911d..ed9b53b7ef0 100644 --- a/server/const.go +++ b/server/const.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/consumer.go b/server/consumer.go index d101d54ca3e..ddc0f6b82d4 100644 --- a/server/consumer.go +++ b/server/consumer.go @@ -4479,6 +4479,7 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { delay time.Duration sz int wrn, wrb int + wrNoWait bool ) o.mu.Lock() @@ -4557,7 +4558,7 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { if o.isPushMode() { dsubj = o.dsubj } else if wr := o.nextWaiting(sz); wr != nil { - wrn, wrb = wr.n, wr.b + wrn, wrb, wrNoWait = wr.n, wr.b, wr.noWait dsubj = wr.reply if o.cfg.PriorityPolicy == PriorityPinnedClient { // FIXME(jrm): Can we make this prettier? @@ -4632,7 +4633,7 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { } // Do actual delivery. - o.deliverMsg(dsubj, ackReply, pmsg, dc, rp) + o.deliverMsg(dsubj, ackReply, pmsg, dc, rp, wrNoWait) // If given request fulfilled batch size, but there are still pending bytes, send information about it. if wrn <= 0 && wrb > 0 { @@ -4831,7 +4832,7 @@ func convertToHeadersOnly(pmsg *jsPubMsg) { // Deliver a msg to the consumer. // Lock should be held and o.mset validated to be non-nil. -func (o *consumer) deliverMsg(dsubj, ackReply string, pmsg *jsPubMsg, dc uint64, rp RetentionPolicy) { +func (o *consumer) deliverMsg(dsubj, ackReply string, pmsg *jsPubMsg, dc uint64, rp RetentionPolicy, wrNoWait bool) { if o.mset == nil { pmsg.returnToPool() return @@ -4867,7 +4868,9 @@ func (o *consumer) deliverMsg(dsubj, ackReply string, pmsg *jsPubMsg, dc uint64, // If we're replicated we MUST only send the message AFTER we've got quorum for updating // delivered state. Otherwise, we could be in an invalid state after a leader change. // We can send immediately if not replicated, not using acks, or using flow control (incompatible). - if o.node == nil || ap == AckNone || o.cfg.FlowControl { + // TODO(mvv): If NoWait we also bypass replicating first. + // Ideally we'd only send the NoWait request timeout after replication and delivery. + if o.node == nil || ap == AckNone || o.cfg.FlowControl || wrNoWait { o.outq.send(pmsg) } else { o.addReplicatedQueuedMsg(pmsg) diff --git a/server/core_benchmarks_test.go b/server/core_benchmarks_test.go index f84394e0c38..4a43b56b3c9 100644 --- a/server/core_benchmarks_test.go +++ b/server/core_benchmarks_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/dirstore.go b/server/dirstore.go index 9d229bc3d6a..6ef11bec7d3 100644 --- a/server/dirstore.go +++ b/server/dirstore.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/dirstore_test.go b/server/dirstore_test.go index ac8679294d1..a4a4794decc 100644 --- a/server/dirstore_test.go +++ b/server/dirstore_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2022 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/disk_avail.go b/server/disk_avail.go index 65e4ecb7899..b879330e850 100644 --- a/server/disk_avail.go +++ b/server/disk_avail.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/disk_avail_netbsd.go b/server/disk_avail_netbsd.go index 1ce39208687..dd81a1bad8a 100644 --- a/server/disk_avail_netbsd.go +++ b/server/disk_avail_netbsd.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/disk_avail_openbsd.go b/server/disk_avail_openbsd.go index 6ed468fc38a..8ffdf12e01d 100644 --- a/server/disk_avail_openbsd.go +++ b/server/disk_avail_openbsd.go @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/disk_avail_wasm.go b/server/disk_avail_wasm.go index 47648834c6f..194687087de 100644 --- a/server/disk_avail_wasm.go +++ b/server/disk_avail_wasm.go @@ -1,4 +1,4 @@ -// Copyright 2022-2021 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/disk_avail_windows.go b/server/disk_avail_windows.go index 9c212437478..c79c506b5a3 100644 --- a/server/disk_avail_windows.go +++ b/server/disk_avail_windows.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/errors.go b/server/errors.go index 1bd4e8f7771..1a83abc4641 100644 --- a/server/errors.go +++ b/server/errors.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/errors_test.go b/server/errors_test.go index c547a5b62c8..fd62994471f 100644 --- a/server/errors_test.go +++ b/server/errors_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/filestore.go b/server/filestore.go index 392e71351d8..5ad6442ac3d 100644 --- a/server/filestore.go +++ b/server/filestore.go @@ -648,6 +648,13 @@ func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error { return err } + // Create or delete the THW if needed. + if cfg.AllowMsgTTL && fs.ttls == nil { + fs.ttls = thw.NewHashWheel() + } else if !cfg.AllowMsgTTL && fs.ttls != nil { + fs.ttls = nil + } + // Limits checks and enforcement. fs.enforceMsgLimit() fs.enforceBytesLimit() @@ -666,7 +673,7 @@ func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error { } fs.mu.Unlock() - if cfg.MaxAge != 0 { + if cfg.MaxAge != 0 || cfg.AllowMsgTTL { fs.expireMsgs() } return nil diff --git a/server/filestore_test.go b/server/filestore_test.go index 78ccf4861ab..22592e3f8e4 100644 --- a/server/filestore_test.go +++ b/server/filestore_test.go @@ -9607,3 +9607,23 @@ func TestFileStoreAccessTimeSpinUp(t *testing.T) { ngra := runtime.NumGoroutine() require_Equal(t, ngr, ngra) } + +func TestFileStoreUpdateConfigTTLState(t *testing.T) { + cfg := StreamConfig{ + Name: "zzz", + Subjects: []string{">"}, + Storage: FileStorage, + } + fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, cfg) + require_NoError(t, err) + defer fs.Stop() + require_Equal(t, fs.ttls, nil) + + cfg.AllowMsgTTL = true + require_NoError(t, fs.UpdateConfig(&cfg)) + require_NotEqual(t, fs.ttls, nil) + + cfg.AllowMsgTTL = false + require_NoError(t, fs.UpdateConfig(&cfg)) + require_Equal(t, fs.ttls, nil) +} diff --git a/server/fuzz.go b/server/fuzz.go index 361ab7c53e2..679d339e0a3 100644 --- a/server/fuzz.go +++ b/server/fuzz.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/gateway.go b/server/gateway.go index ca0e2474208..5f4d3582347 100644 --- a/server/gateway.go +++ b/server/gateway.go @@ -1,4 +1,4 @@ -// Copyright 2018-2024 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/gateway_test.go b/server/gateway_test.go index 3febf8d7a68..ae7aae1ad06 100644 --- a/server/gateway_test.go +++ b/server/gateway_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2024 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/ipqueue_test.go b/server/ipqueue_test.go index 8795ba705ff..3bda6877794 100644 --- a/server/ipqueue_test.go +++ b/server/ipqueue_test.go @@ -1,4 +1,4 @@ -// Copyright 2021-2024 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/jetstream_cluster.go b/server/jetstream_cluster.go index 6c486391b1e..c0d8d83bddd 100644 --- a/server/jetstream_cluster.go +++ b/server/jetstream_cluster.go @@ -468,6 +468,7 @@ func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) error { return errors.New("stream not found") } + msetNode := mset.raftNode() switch { case mset.cfg.Replicas <= 1: return nil // No further checks for R=1 streams @@ -475,7 +476,11 @@ func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) error { case node == nil: return errors.New("group node missing") - case node != mset.raftNode(): + case msetNode == nil: + // Can happen when the stream's node is not yet initialized. + return errors.New("stream node missing") + + case node != msetNode: s.Warnf("Detected stream cluster node skew '%s > %s'", acc.GetName(), streamName) node.Delete() mset.resetClusteredState(nil) @@ -521,6 +526,7 @@ func (js *jetStream) isConsumerHealthy(mset *stream, consumer string, ca *consum return errors.New("consumer not found") } + oNode := o.raftNode() rc, _ := o.replica() switch { case rc <= 1: @@ -529,7 +535,11 @@ func (js *jetStream) isConsumerHealthy(mset *stream, consumer string, ca *consum case node == nil: return errors.New("group node missing") - case node != o.raftNode(): + case oNode == nil: + // Can happen when the consumer's node is not yet initialized. + return errors.New("consumer node missing") + + case node != oNode: mset.mu.RLock() accName, streamName := mset.acc.GetName(), mset.cfg.Name mset.mu.RUnlock() @@ -2417,6 +2427,8 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps doSnapshot() return case <-mqch: + // Clean signal from shutdown routine so do best effort attempt to snapshot. + doSnapshot() return case <-qch: // Clean signal from shutdown routine so do best effort attempt to snapshot. @@ -8575,7 +8587,28 @@ RETRY: // Check for eof signaling. if len(msg) == 0 { msgsQ.recycle(&mrecs) - return nil + + // Sanity check that we've received all data expected by the snapshot. + mset.mu.RLock() + lseq := mset.lseq + mset.mu.RUnlock() + if lseq >= snap.LastSeq { + return nil + } + + // Make sure we do not spin and make things worse. + const minRetryWait = 2 * time.Second + elapsed := time.Since(reqSendTime) + if elapsed < minRetryWait { + select { + case <-s.quitCh: + return ErrServerNotRunning + case <-qch: + return errCatchupStreamStopped + case <-time.After(minRetryWait - elapsed): + } + } + goto RETRY } if _, err := mset.processCatchupMsg(msg); err == nil { if mrec.reply != _EMPTY_ { @@ -9079,6 +9112,15 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // In the latter case the request expects us to have more. Just continue and value availability here. // This should only be possible if the logs have already desynced, and we shouldn't have become leader // in the first place. Not much we can do here in this (hypothetical) scenario. + + // Do another quick sanity check that we actually have enough data to satisfy the request. + // If not, let's step down and hope a new leader can correct this. + if state.LastSeq < last { + s.Warnf("Catchup for stream '%s > %s' skipped, requested sequence %d was larger than current state: %+v", + mset.account(), mset.name(), seq, state) + node.StepDown() + return + } } mset.setCatchupPeer(sreq.Peer, last-seq) @@ -9198,7 +9240,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // The snapshot has a larger last sequence then we have. This could be due to a truncation // when trying to recover after corruption, still not 100% sure. Could be off by 1 too somehow, // but tested a ton of those with no success. - s.Warnf("Catchup for stream '%s > %s' completed, but requested sequence %d was larger then current state: %+v", + s.Warnf("Catchup for stream '%s > %s' completed, but requested sequence %d was larger than current state: %+v", mset.account(), mset.name(), seq, state) // Try our best to redo our invalidated snapshot as well. if n := mset.raftNode(); n != nil { diff --git a/server/jetstream_cluster_1_test.go b/server/jetstream_cluster_1_test.go index 7cba7fa5afa..59a292fb73e 100644 --- a/server/jetstream_cluster_1_test.go +++ b/server/jetstream_cluster_1_test.go @@ -7313,6 +7313,80 @@ func TestJetStreamClusterStreamHealthCheckMustNotRecreate(t *testing.T) { checkNodeIsClosed(sa) } +func TestJetStreamClusterStreamHealthCheckMustNotDeleteEarly(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + waitForStreamAssignments := func() { + t.Helper() + checkFor(t, 5*time.Second, time.Second, func() error { + for _, s := range c.servers { + js := s.getJetStream() + js.mu.RLock() + sa := js.streamAssignment(globalAccountName, "TEST") + js.mu.RUnlock() + if sa == nil { + return fmt.Errorf("stream assignment not found on %s", s.Name()) + } + } + return nil + }) + } + getStreamAssignment := func(rs *Server) (*jetStream, *Account, *streamAssignment, *stream) { + acc, err := rs.lookupAccount(globalAccountName) + require_NoError(t, err) + mset, err := acc.lookupStream("TEST") + require_NotNil(t, err) + + sjs := rs.getJetStream() + sjs.mu.RLock() + defer sjs.mu.RUnlock() + + sas := sjs.cluster.streams[globalAccountName] + require_True(t, sas != nil) + sa := sas["TEST"] + require_True(t, sa != nil) + sa.Created = time.Time{} + return sjs, acc, sa, mset + } + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + waitForStreamAssignments() + + // We manually clear the node on the stream. + rs := c.randomNonStreamLeader(globalAccountName, "TEST") + sjs, acc, sa, mset := getStreamAssignment(rs) + mset.mu.Lock() + mset.node = nil + mset.mu.Unlock() + sjs.mu.Lock() + group := sa.Group + if group == nil { + sjs.mu.Unlock() + t.Fatal("sa.Group not initialized") + } + node := group.node + if node == nil { + sjs.mu.Unlock() + t.Fatal("sa.Group.node not initialized") + } + sjs.mu.Unlock() + + // The health check gets the Raft node of the assignment and checks it against the + // Raft node of the stream. We simulate a race condition where the stream's Raft node + // is not yet initialized. The health check MUST NOT delete the node. + sjs.isStreamHealthy(acc, sa) + require_Equal(t, node.State(), Follower) +} + func TestJetStreamClusterConsumerHealthCheckMustNotRecreate(t *testing.T) { c := createJetStreamClusterExplicit(t, "R3S", 3) defer c.shutdown() @@ -7432,6 +7506,82 @@ func TestJetStreamClusterConsumerHealthCheckMustNotRecreate(t *testing.T) { checkNodeIsClosed(ca) } +func TestJetStreamClusterConsumerHealthCheckMustNotDeleteEarly(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + waitForConsumerAssignments := func() { + t.Helper() + checkFor(t, 5*time.Second, time.Second, func() error { + for _, s := range c.servers { + if s.getJetStream().consumerAssignment(globalAccountName, "TEST", "CONSUMER") == nil { + return fmt.Errorf("stream assignment not found on %s", s.Name()) + } + } + return nil + }) + } + getConsumerAssignment := func(rs *Server) (*jetStream, *consumerAssignment, *stream, *consumer) { + acc, err := rs.lookupAccount(globalAccountName) + require_NoError(t, err) + mset, err := acc.lookupStream("TEST") + require_NotNil(t, err) + o := mset.lookupConsumer("CONSUMER") + + sjs := rs.getJetStream() + sjs.mu.RLock() + defer sjs.mu.RUnlock() + + sas := sjs.cluster.streams[globalAccountName] + require_True(t, sas != nil) + sa := sas["TEST"] + require_True(t, sa != nil) + ca := sa.consumers["CONSUMER"] + require_True(t, ca != nil) + ca.Created = time.Time{} + return sjs, ca, mset, o + } + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + Retention: nats.InterestPolicy, // Replicated consumers by default + }) + require_NoError(t, err) + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "CONSUMER"}) + require_NoError(t, err) + waitForConsumerAssignments() + + // We manually clear the node on the consumer. + rs := c.randomNonConsumerLeader(globalAccountName, "TEST", "CONSUMER") + sjs, ca, mset, o := getConsumerAssignment(rs) + o.mu.Lock() + o.node = nil + o.mu.Unlock() + sjs.mu.Lock() + group := ca.Group + if group == nil { + sjs.mu.Unlock() + t.Fatal("ca.Group not initialized") + } + node := group.node + if node == nil { + sjs.mu.Unlock() + t.Fatal("ca.Group.node not initialized") + } + sjs.mu.Unlock() + + // The health check gets the Raft node of the assignment and checks it against the + // Raft node of the consumer. We simulate a race condition where the consumer's Raft node + // is not yet initialized. The health check MUST NOT delete the node. + sjs.isConsumerHealthy(mset, "CONSUMER", ca) + require_Equal(t, node.State(), Follower) +} + func TestJetStreamClusterRespectConsumerStartSeq(t *testing.T) { c := createJetStreamClusterExplicit(t, "R3S", 3) defer c.shutdown() @@ -8464,6 +8614,66 @@ func TestJetStreamClusterOfflineR1ConsumerDenyUpdate(t *testing.T) { require_Error(t, err, NewJSConsumerOfflineError()) } +func TestJetStreamClusterSnapshotStreamAssetOnShutdown(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + var sds []string + for _, s := range c.servers { + sds = append(sds, s.StoreDir()) + } + + for _, sd := range sds { + matches, err := filepath.Glob(filepath.Join(sd, "$SYS", "_js_", "*", snapshotsDir, "*")) + require_NoError(t, err) + require_True(t, len(matches) > 0) + for _, match := range matches { + require_NoError(t, os.RemoveAll(match)) + } + } + + // Publish, so we have something new to snapshot. + _, err = js.Publish("foo", nil) + require_NoError(t, err) + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + return checkState(t, c, globalAccountName, "TEST") + }) + + // Shutdown servers, and check if all made stream snapshots. + for _, s := range c.servers { + s.Shutdown() + } + for _, sd := range sds { + matches, err := filepath.Glob(filepath.Join(sd, "$SYS", "_js_", "*", snapshotsDir)) + require_NoError(t, err) + // Matches _meta_ and stream raft groups. + require_Len(t, len(matches), 2) + var foundStream bool + for _, match := range matches { + if !strings.Contains(match, "S-R3F") { + continue + } + foundStream = true + dirs, err := os.ReadDir(match) + require_NoError(t, err) + if len(dirs) != 1 { + t.Errorf("Missing snapshot for %s", match) + } + } + require_True(t, foundStream) + } +} + // // DO NOT ADD NEW TESTS IN THIS FILE (unless to balance test times) // Add at the end of jetstream_cluster__test.go, with being the highest value. diff --git a/server/jetstream_cluster_2_test.go b/server/jetstream_cluster_2_test.go index bae044ce32a..2a04e46bcf1 100644 --- a/server/jetstream_cluster_2_test.go +++ b/server/jetstream_cluster_2_test.go @@ -6316,7 +6316,11 @@ func TestJetStreamClusterStreamResetOnExpirationDuringPeerDownAndRestartWithLead // Now clear raft WAL. mset, err := nsl.GlobalAccount().lookupStream("TEST") require_NoError(t, err) - require_NoError(t, mset.raftNode().InstallSnapshot(mset.stateSnapshot())) + // Snapshot could already be done during shutdown. If so, snapshotting again will not be available. + err = mset.raftNode().InstallSnapshot(mset.stateSnapshot()) + if err != nil { + require_Error(t, err, errNoSnapAvailable) + } nsl.Shutdown() nsl = c.restartServer(nsl) diff --git a/server/jetstream_cluster_4_test.go b/server/jetstream_cluster_4_test.go index e2d73d1d1a0..30c6c42a2c8 100644 --- a/server/jetstream_cluster_4_test.go +++ b/server/jetstream_cluster_4_test.go @@ -3528,8 +3528,118 @@ func TestJetStreamClusterConsumerDesyncAfterErrorDuringStreamCatchup(t *testing. c.waitOnConsumerLeader(globalAccountName, "TEST", "CONSUMER") // Outdated server must NOT become the leader. - newConsummerLeaderServer := c.consumerLeader(globalAccountName, "TEST", "CONSUMER") - require_Equal(t, newConsummerLeaderServer.Name(), clusterResetServerName) + newConsumerLeaderServer := c.consumerLeader(globalAccountName, "TEST", "CONSUMER") + require_Equal(t, newConsumerLeaderServer.Name(), clusterResetServerName) +} + +func TestJetStreamClusterDesyncAfterEofFromOldStreamLeader(t *testing.T) { + test := func(t *testing.T, eof bool) { + c := createJetStreamClusterExplicit(t, "R5S", 5) + defer c.shutdown() + + cs := c.randomServer() + nc, js := jsClientConnect(t, cs) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 5, + }) + require_NoError(t, err) + + sl := c.streamLeader(globalAccountName, "TEST") + var rs *Server + var catchup *Server + for _, s := range c.servers { + if s != sl && s != cs { + if rs == nil { + rs = s + } else { + catchup = s + break + } + } + } + + // Shutdown server that needs to catch up, so it gets a snapshot from the leader after restart. + catchup.Shutdown() + + // One message is received and applied by all replicas. + _, err = js.Publish("foo", nil) + require_NoError(t, err) + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + return checkState(t, c, globalAccountName, "TEST") + }) + + // Disable Raft and start cluster subs for server, simulating an old leader with an outdated log. + acc, err := rs.lookupAccount(globalAccountName) + require_NoError(t, err) + mset, err := acc.lookupStream("TEST") + require_NoError(t, err) + mset.startClusterSubs() + rn := mset.raftNode() + rn.Stop() + rn.WaitForStop() + + // Temporarily disable cluster subs for this test. + // Normally due to multiple cluster subs responses will interleave, but this is simpler for this test. + acc, err = sl.lookupAccount(globalAccountName) + require_NoError(t, err) + mset, err = acc.lookupStream("TEST") + require_NoError(t, err) + mset.stopClusterSubs() + + // Publish another message that the old leader will not get. + _, err = js.Publish("foo", nil) + require_NoError(t, err) + require_NoError(t, sl.JetStreamSnapshotStream(globalAccountName, "TEST")) + + sa := sl.getJetStream().streamAssignment(globalAccountName, "TEST") + require_NotNil(t, sa) + + // Send EOF immediately to requesting server, otherwise the server needs to time out and retry. + if eof { + snc, err := nats.Connect(rs.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) + require_NoError(t, err) + defer snc.Close() + + sub, err := snc.Subscribe(sa.Sync, func(msg *nats.Msg) { + // EOF + rs.sendInternalMsgLocked(msg.Reply, _EMPTY_, nil, nil) + }) + require_NoError(t, err) + defer sub.Drain() + } + + // Restart server so it starts catching up. + catchup = c.restartServer(catchup) + + // Wait for server to start catching up. + // This shouldn't be a problem. The server should retry catchup, recognizing it wasn't caught up fully. + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + if a, err := catchup.lookupAccount(globalAccountName); err != nil { + return err + } else if m, err := a.lookupStream("TEST"); err != nil { + return err + } else if !m.isCatchingUp() { + return errors.New("stream not catching up") + } + return nil + }) + + // Stop old leader, and re-enable cluster subs on proper leader. + rs.Shutdown() + mset.startClusterSubs() + + // Server should automatically restart catchup and get the missing data. + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { + return checkState(t, c, globalAccountName, "TEST") + }) + } + + t.Run("eof", func(t *testing.T) { test(t, true) }) + t.Run("retry", func(t *testing.T) { test(t, false) }) } func TestJetStreamClusterReservedResourcesAccountingAfterClusterReset(t *testing.T) { diff --git a/server/jetstream_consumer_test.go b/server/jetstream_consumer_test.go index fbdd21ec795..bbb2ba0ccee 100644 --- a/server/jetstream_consumer_test.go +++ b/server/jetstream_consumer_test.go @@ -9642,3 +9642,46 @@ func TestJetStreamConsumerStateAlwaysFromStore(t *testing.T) { require_Equal(t, ci.Delivered.Stream, 1) require_Equal(t, ci.AckFloor.Stream, 1) } + +func TestJetStreamConsumerPullNoWaitBatchLargerThanPending(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ + Durable: "C", + AckPolicy: nats.AckExplicitPolicy, + FilterSubject: "foo", + }) + require_NoError(t, err) + + req := JSApiConsumerGetNextRequest{Batch: 10, NoWait: true} + + for range 5 { + _, err := js.Publish("foo", []byte("OK")) + require_NoError(t, err) + } + + sub := sendRequest(t, nc, "rply", req) + defer sub.Unsubscribe() + + // Should get all 5 messages. + // TODO(mvv): Currently bypassing replicating first, need to figure out + // how to send NoWait's request timeout after replication. + for range 5 { + msg, err := sub.NextMsg(time.Second) + require_NoError(t, err) + if len(msg.Data) == 0 && msg.Header != nil { + t.Fatalf("Expected data, got: %s", msg.Header.Get("Description")) + } + } +} diff --git a/server/jetstream_helpers_test.go b/server/jetstream_helpers_test.go index 5ebd7a1deb1..b5afe755063 100644 --- a/server/jetstream_helpers_test.go +++ b/server/jetstream_helpers_test.go @@ -1883,6 +1883,7 @@ type netProxy struct { down int url string surl string + port int } func newNetProxy(rtt time.Duration, upRate, downRate int, serverURL string) *netProxy { @@ -1895,14 +1896,27 @@ func createNetProxy(rtt time.Duration, upRate, downRate int, serverURL string, s if e != nil { panic(fmt.Sprintf("Error listening on port: %s, %q", hp, e)) } + u, err := url.Parse(serverURL) + if err != nil { + panic(fmt.Sprintf("Could not parse server URL: %v", err)) + } + + var clientURL string port := l.Addr().(*net.TCPAddr).Port + if u.User != nil { + clientURL = fmt.Sprintf("nats://%v@127.0.0.1:%d", u.User, port) + } else { + clientURL = fmt.Sprintf("nats://127.0.0.1:%d", port) + } + proxy := &netProxy{ listener: l, rtt: rtt, up: upRate, down: downRate, - url: fmt.Sprintf("nats://127.0.0.1:%d", port), + url: clientURL, surl: serverURL, + port: port, } if start { proxy.start() @@ -1917,6 +1931,16 @@ func (np *netProxy) start() { } host := u.Host + // Check if this is restart. + // We nil out listener on stop() + if np.listener == nil && np.port != 0 { + hp := net.JoinHostPort("127.0.0.1", fmt.Sprintf("%d", np.port)) + np.listener, err = net.Listen("tcp", hp) + if err != nil { + panic(fmt.Sprintf("Error listening on port: %s, %q", hp, err)) + } + } + go func() { for { client, err := np.listener.Accept() @@ -1942,6 +1966,10 @@ func (np *netProxy) routeURL() string { return strings.Replace(np.url, "nats", "nats-route", 1) } +func (np *netProxy) leafURL() string { + return strings.Replace(np.url, "nats", "nats-leaf", 1) +} + func (np *netProxy) loop(tbw int, r, w net.Conn) { const rbl = 8192 var buf [rbl]byte diff --git a/server/jetstream_leafnode_test.go b/server/jetstream_leafnode_test.go index 8900d6136a2..0c796cbba63 100644 --- a/server/jetstream_leafnode_test.go +++ b/server/jetstream_leafnode_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -1426,3 +1426,249 @@ func TestJetStreamLeafNodeJSClusterMigrateRecoveryWithDelay(t *testing.T) { // long election timer. Now this should work reliably. lnc.waitOnStreamLeader(globalAccountName, "TEST") } + +// This will test that when a mirror or source construct is setup across a leafnode/domain +// that it will recover quickly once the LN is re-established regardless +// of backoff state of the internal consumer create. +func TestJetStreamLeafNodeAndMirrorResyncAfterConnectionDown(t *testing.T) { + tmplA := ` + listen: -1 + server_name: tcm + jetstream { + store_dir: '%s', + domain: TCM + } + accounts { + JS { users = [ { user: "y", pass: "p" } ]; jetstream: true } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + } + leaf { port: -1 } + ` + confA := createConfFile(t, []byte(fmt.Sprintf(tmplA, t.TempDir()))) + sA, oA := RunServerWithConfig(confA) + defer sA.Shutdown() + + // Create a proxy - we will use this to simulate a network down event. + rtt, bw := 10*time.Microsecond, 10*1024*1024*1024 + proxy := newNetProxy(rtt, bw, bw, fmt.Sprintf("nats://y:p@127.0.0.1:%d", oA.LeafNode.Port)) + defer proxy.stop() + + tmplB := ` + listen: -1 + server_name: xmm + jetstream { + store_dir: '%s', + domain: XMM + } + accounts { + JS { users = [ { user: "y", pass: "p" } ]; jetstream: true } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + } + leaf { remotes [ { url: %s, account: "JS" } ], reconnect: "0.25s" } + ` + + confB := createConfFile(t, []byte(fmt.Sprintf(tmplB, t.TempDir(), proxy.leafURL()))) + sB, _ := RunServerWithConfig(confB) + defer sA.Shutdown() + + // Make sure we are connected ok. + checkLeafNodeConnectedCount(t, sA, 1) + checkLeafNodeConnectedCount(t, sB, 1) + + // We will have 3 streams that we will test for proper syncing after + // the network is restored. + // + // 1. Mirror A --> B + // 2. Mirror A <-- B + // 3. Source A <-> B + + // Connect to sA. + ncA, jsA := jsClientConnect(t, sA, nats.UserInfo("y", "p")) + defer ncA.Close() + + // Connect to sB. + ncB, jsB := jsClientConnect(t, sB, nats.UserInfo("y", "p")) + defer ncB.Close() + + // Add in TEST-A + _, err := jsA.AddStream(&nats.StreamConfig{Name: "TEST-A", Subjects: []string{"foo"}}) + require_NoError(t, err) + + // Add in TEST-B + _, err = jsB.AddStream(&nats.StreamConfig{Name: "TEST-B", Subjects: []string{"bar"}}) + require_NoError(t, err) + + // Now setup mirrors. + _, err = jsB.AddStream(&nats.StreamConfig{ + Name: "M-A", + Mirror: &nats.StreamSource{ + Name: "TEST-A", + External: &nats.ExternalStream{APIPrefix: "$JS.TCM.API"}, + }, + }) + require_NoError(t, err) + + _, err = jsA.AddStream(&nats.StreamConfig{ + Name: "M-B", + Mirror: &nats.StreamSource{ + Name: "TEST-B", + External: &nats.ExternalStream{APIPrefix: "$JS.XMM.API"}, + }, + }) + require_NoError(t, err) + + // Now add in the streams that will source from one another bi-directionally. + _, err = jsA.AddStream(&nats.StreamConfig{ + Name: "SRC-A", + Subjects: []string{"A.*"}, + Sources: []*nats.StreamSource{{ + Name: "SRC-B", + FilterSubject: "B.*", + External: &nats.ExternalStream{APIPrefix: "$JS.XMM.API"}, + }}, + }) + require_NoError(t, err) + + _, err = jsB.AddStream(&nats.StreamConfig{ + Name: "SRC-B", + Subjects: []string{"B.*"}, + Sources: []*nats.StreamSource{{ + Name: "SRC-A", + FilterSubject: "A.*", + External: &nats.ExternalStream{APIPrefix: "$JS.TCM.API"}, + }}, + }) + require_NoError(t, err) + + // Now load them up with 500 messages. + initMsgs := 500 + for i := 0; i < initMsgs; i++ { + // Individual Streams + jsA.PublishAsync("foo", []byte("PAYLOAD")) + jsB.PublishAsync("bar", []byte("PAYLOAD")) + // Bi-directional Sources + jsA.PublishAsync("A.foo", []byte("PAYLOAD")) + jsB.PublishAsync("B.bar", []byte("PAYLOAD")) + } + select { + case <-jsA.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + select { + case <-jsB.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + // Utility to check the number of stream msgs. + checkStreamMsgs := func(js nats.JetStreamContext, sname string, expected int, perr error) error { + t.Helper() + if perr != nil { + return perr + } + si, err := js.StreamInfo(sname) + require_NoError(t, err) + if si.State.Msgs != uint64(expected) { + return fmt.Errorf("Expected %d msgs for %s, got state: %+v", expected, sname, si.State) + } + return nil + } + + // Wait til we see all messages. + checkFor(t, 2*time.Second, 250*time.Millisecond, func() error { + err := checkStreamMsgs(jsA, "TEST-A", initMsgs, nil) + err = checkStreamMsgs(jsB, "M-A", initMsgs, err) + err = checkStreamMsgs(jsB, "TEST-B", initMsgs, err) + err = checkStreamMsgs(jsA, "M-B", initMsgs, err) + err = checkStreamMsgs(jsA, "SRC-A", initMsgs*2, err) + err = checkStreamMsgs(jsB, "SRC-B", initMsgs*2, err) + return err + }) + + // Take down proxy. This will stop any propagation of messages between TEST and M streams. + proxy.stop() + + // Now add an additional 500 messages to originals on both sides. + for i := 0; i < initMsgs; i++ { + // Individual Streams + jsA.PublishAsync("foo", []byte("PAYLOAD")) + jsB.PublishAsync("bar", []byte("PAYLOAD")) + // Bi-directional Sources + jsA.PublishAsync("A.foo", []byte("PAYLOAD")) + jsB.PublishAsync("B.bar", []byte("PAYLOAD")) + } + select { + case <-jsA.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + select { + case <-jsB.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + cancelAndDelayConsumer := func(s *Server, stream string) { + // Now make sure internal consumer is at max backoff. + acc, err := s.lookupAccount("JS") + require_NoError(t, err) + mset, err := acc.lookupStream(stream) + require_NoError(t, err) + + // Reset sourceInfo to have lots of failures and last attempt 2 minutes ago. + // Lock should be held on parent stream. + resetSourceInfo := func(si *sourceInfo) { + si.sip = false + si.fails = 100 + si.lreq = time.Now().Add(-2 * time.Minute) + } + + // Force the consumer to be canceled and we simulate 100 failed attempts + // such that the next time we will try will be a long way out. + mset.mu.Lock() + if mset.mirror != nil { + resetSourceInfo(mset.mirror) + mset.cancelSourceInfo(mset.mirror) + mset.scheduleSetupMirrorConsumerRetry() + } else if len(mset.sources) > 0 { + for iname, si := range mset.sources { + resetSourceInfo(si) + mset.cancelSourceInfo(si) + mset.setupSourceConsumer(iname, si.sseq+1, time.Time{}) + } + } + mset.mu.Unlock() + } + + // Mirrors + cancelAndDelayConsumer(sA, "M-B") + cancelAndDelayConsumer(sB, "M-A") + // Now bi-directional sourcing + cancelAndDelayConsumer(sA, "SRC-A") + cancelAndDelayConsumer(sB, "SRC-B") + + // Now restart the network proxy. + proxy.start() + + // Make sure we are connected ok. + checkLeafNodeConnectedCount(t, sA, 1) + checkLeafNodeConnectedCount(t, sB, 1) + + // These should be good before re-sync. + require_NoError(t, checkStreamMsgs(jsA, "TEST-A", initMsgs*2, nil)) + require_NoError(t, checkStreamMsgs(jsB, "TEST-B", initMsgs*2, nil)) + + start := time.Now() + // Wait til we see all messages. + checkFor(t, 2*time.Minute, 50*time.Millisecond, func() error { + err := checkStreamMsgs(jsA, "M-B", initMsgs*2, err) + err = checkStreamMsgs(jsB, "M-A", initMsgs*2, err) + err = checkStreamMsgs(jsA, "SRC-A", initMsgs*4, err) + err = checkStreamMsgs(jsB, "SRC-B", initMsgs*4, err) + return err + }) + if elapsed := time.Since(start); elapsed > 2*time.Second { + t.Fatalf("Expected to resync all streams <2s but got %v", elapsed) + } +} diff --git a/server/jetstream_meta_benchmark_test.go b/server/jetstream_meta_benchmark_test.go index 33869d5327e..24ce2b0b1eb 100644 --- a/server/jetstream_meta_benchmark_test.go +++ b/server/jetstream_meta_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright 2024 The NATS Authors +// Copyright 2024-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/jetstream_super_cluster_test.go b/server/jetstream_super_cluster_test.go index 26cff311014..642700efe00 100644 --- a/server/jetstream_super_cluster_test.go +++ b/server/jetstream_super_cluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/jetstream_test.go b/server/jetstream_test.go index 8bb9b227ddd..f00f47d3a6d 100644 --- a/server/jetstream_test.go +++ b/server/jetstream_test.go @@ -9511,6 +9511,57 @@ func TestJetStreamMirrorBasics(t *testing.T) { } +func TestJetStreamMirrorStripExpectedHeaders(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + // Client for API requests. + nc, js := jsClientConnect(t, s) + defer nc.Close() + + // Create source and mirror streams. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "S", + Subjects: []string{"foo"}, + }) + require_NoError(t, err) + _, err = js.AddStream(&nats.StreamConfig{ + Name: "M", + Mirror: &nats.StreamSource{Name: "S"}, + }) + require_NoError(t, err) + + m := nats.NewMsg("foo") + pubAck, err := js.PublishMsg(m) + require_NoError(t, err) + require_Equal(t, pubAck.Sequence, 1) + + // Mirror should get message. + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + if si, err := js.StreamInfo("M"); err != nil { + return err + } else if si.State.Msgs != 1 { + return fmt.Errorf("expected 1 mirrored msg, got %d", si.State.Msgs) + } + return nil + }) + + m.Header.Set("Nats-Expected-Stream", "S") + pubAck, err = js.PublishMsg(m) + require_NoError(t, err) + require_Equal(t, pubAck.Sequence, 2) + + // Mirror should strip expected headers and store the message. + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + if si, err := js.StreamInfo("M"); err != nil { + return err + } else if si.State.Msgs != 2 { + return fmt.Errorf("expected 2 mirrored msgs, got %d", si.State.Msgs) + } + return nil + }) +} + func TestJetStreamMirrorUpdatePreventsSubjects(t *testing.T) { s := RunBasicJetStreamServer(t) defer s.Shutdown() diff --git a/server/jwt.go b/server/jwt.go index 1e30f82f4c3..e8da5213cc5 100644 --- a/server/jwt.go +++ b/server/jwt.go @@ -1,4 +1,4 @@ -// Copyright 2018-2024 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/jwt_test.go b/server/jwt_test.go index 4a00495fbb1..331e3117769 100644 --- a/server/jwt_test.go +++ b/server/jwt_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2024 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/leafnode.go b/server/leafnode.go index 4904aee2f21..f49544812f3 100644 --- a/server/leafnode.go +++ b/server/leafnode.go @@ -1418,7 +1418,7 @@ func (c *client) processLeafnodeInfo(info *Info) { c.setPermissions(perms) } - var resumeConnect bool + var resumeConnect, checkSyncConsumers bool // If this is a remote connection and this is the first INFO protocol, // then we need to finish the connect process by sending CONNECT, etc.. @@ -1428,6 +1428,7 @@ func (c *client) processLeafnodeInfo(info *Info) { resumeConnect = true } else if !firstINFO && didSolicit { c.leaf.remoteAccName = info.RemoteAccount + checkSyncConsumers = info.JetStream } // Check if we have the remote account information and if so make sure it's stored. @@ -1446,6 +1447,12 @@ func (c *client) processLeafnodeInfo(info *Info) { if finishConnect { s.leafNodeFinishConnectProcess(c) } + + // If we have JS enabled and so does the other side, we will + // check to see if we need to kick any internal source or mirror consumers. + if checkSyncConsumers { + s.checkInternalSyncConsumers(c.acc, info.Domain) + } } func (s *Server) negotiateLeafCompression(c *client, didSolicit bool, infoCompression string, co *CompressionOpts) (bool, error) { @@ -1954,11 +1961,12 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro // If we received pub deny permissions from the other end, merge with existing ones. c.mergeDenyPermissions(pub, proto.DenyPub) + acc := c.acc c.mu.Unlock() // Register the cluster, even if empty, as long as we are acting as a hub. if !proto.Hub { - c.acc.registerLeafNodeCluster(proto.Cluster) + acc.registerLeafNodeCluster(proto.Cluster) } // Add in the leafnode here since we passed through auth at this point. @@ -1973,12 +1981,58 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro s.initLeafNodeSmapAndSendSubs(c) // Announce the account connect event for a leaf node. - // This will no-op as needed. + // This will be a no-op as needed. s.sendLeafNodeConnect(c.acc) + // If we have JS enabled and so does the other side, we will + // check to see if we need to kick any internal source or mirror consumers. + if proto.JetStream { + s.checkInternalSyncConsumers(acc, proto.Domain) + } return nil } +// checkInternalSyncConsumers +func (s *Server) checkInternalSyncConsumers(acc *Account, remoteDomain string) { + // Grab our js + js := s.getJetStream() + + // Only applicable if we have JS and the leafnode has JS as well. + // We check for remote JS outside. + if !js.isEnabled() || acc == nil { + return + } + + // We will check all streams in our local account. They must be a leader and + // be sourcing or mirroring. We will check the external config on the stream itself + // if this is cross domain, or if the remote domain is empty, meaning we might be + // extedning the system across this leafnode connection and hence we would be extending + // our own domain. + jsa := js.lookupAccount(acc) + if jsa == nil { + return + } + var streams []*stream + jsa.mu.RLock() + for _, mset := range jsa.streams { + mset.cfgMu.RLock() + // We need to have a mirror or source defined. + // We do not want to force another lock here to look for leader status, + // so collect and after we release jsa will make sure. + if mset.cfg.Mirror != nil || len(mset.cfg.Sources) > 0 { + streams = append(streams, mset) + } + mset.cfgMu.RUnlock() + } + jsa.mu.RUnlock() + + // Now loop through all candidates and check if we are the leader and have NOT + // created the sync up consumer. + for _, mset := range streams { + mset.retryDisconnectedSyncConsumers(remoteDomain) + } +} + // Returns the remote cluster name. This is set only once so does not require a lock. func (c *client) remoteCluster() string { if c.leaf == nil { diff --git a/server/leafnode_test.go b/server/leafnode_test.go index f00a46d8849..d5ebc462081 100644 --- a/server/leafnode_test.go +++ b/server/leafnode_test.go @@ -9975,3 +9975,115 @@ func TestLeafNodePermissionWithLiteralSubjectAndQueueInterest(t *testing.T) { }) require_Equal(t, "OK", string(resp.Data)) } + +func TestLeafNodePermissionWithGateways(t *testing.T) { + usConf := createConfFile(t, []byte(` + server_name: "US" + listen: "127.0.0.1:-1" + gateway { + name: "US" + listen: "127.0.0.1:-1" + } + accounts { + sys { users: [{user: sys, password: sys}] } + leaf { users: [{user: leaf, password: leaf}] } + } + system_account: sys + `)) + us, ous := RunServerWithConfig(usConf) + defer us.Shutdown() + + euConf := createConfFile(t, fmt.Appendf(nil, ` + server_name: "EU" + listen: "127.0.0.1:-1" + gateway { + name: "EU" + listen: "127.0.0.1:-1" + gateways: [ + { + name: "US" + urls: ["nats://127.0.0.1:%d"] + } + ] + } + leafnodes { + listen: "127.0.0.1:-1" + } + accounts { + sys { users: [{user: sys, password: sys}] } + leaf { + users: [ + { + user: leaf + password: leaf + permissions: { + publish: "bar" + subscribe: "foo" + } + } + ] + } + } + system_account: sys + `, ous.Gateway.Port)) + eu, oeu := RunServerWithConfig(euConf) + defer eu.Shutdown() + + waitForOutboundGateways(t, us, 1, 2*time.Second) + waitForOutboundGateways(t, eu, 1, 2*time.Second) + waitForInboundGateways(t, us, 1, 2*time.Second) + waitForInboundGateways(t, eu, 1, 2*time.Second) + + leafConf := createConfFile(t, fmt.Appendf(nil, ` + server_name: "LEAF" + listen: "127.0.0.1:-1" + leafnodes { + remotes: [ + { url: "nats://leaf:leaf@127.0.0.1:%d" } + ] + } + `, oeu.LeafNode.Port)) + leaf, _ := RunServerWithConfig(leafConf) + defer leaf.Shutdown() + + checkLeafNodeConnected(t, leaf) + + // Run the service from EU leafnode. + ncEU := natsConnect(t, leaf.ClientURL()) + defer ncEU.Close() + natsSub(t, ncEU, "foo", func(m *nats.Msg) { + m.Respond([]byte("response")) + }) + natsFlush(t, ncEU) + + // Wait for subject interest to propagate. + checkGWInterestOnlyModeInterestOn(t, us, "EU", "leaf", "foo") + + // Connect to the US server + ncUS := natsConnect(t, us.ClientURL(), nats.UserInfo("leaf", "leaf")) + defer ncUS.Close() + + // Create a subscription on "bar" and send request on "foo" + sub := natsSubSync(t, ncUS, "bar") + // Wait for subject to propagate so we know that EU server + // would know about the "bar" subscription. + checkGWInterestOnlyModeInterestOn(t, eu, "US", "leaf", "bar") + // Send the request and make sure we receive the reply. + natsPubReq(t, ncUS, "foo", "bar", []byte("request")) + reply := natsNexMsg(t, sub, time.Second) + if string(reply.Data) != "response" { + t.Fatalf("Invalid response: %q", reply.Data) + } + // Make sure that we don't blindly accept any reply because there + // is the routing protocol. So create a sub on "baz" that the leaf + // would not be allowed to reply to. We should not get the reply + // to our request. + sub2 := natsSubSync(t, ncUS, "baz") + checkGWInterestOnlyModeInterestOn(t, eu, "US", "leaf", "baz") + // Send the request. We should not get the message since the + // leaf server does not have permission to publish on "baz". + natsPubReq(t, ncUS, "foo", "baz", []byte("request2")) + if msg, err := sub2.NextMsg(250 * time.Millisecond); err == nil { + t.Fatalf("Should not have received the reply, got %q", msg.Data) + } +} diff --git a/server/log.go b/server/log.go index 9a4b7ed4bb0..9baa31dc654 100644 --- a/server/log.go +++ b/server/log.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/log_test.go b/server/log_test.go index 55220489a30..41b5bb00f38 100644 --- a/server/log_test.go +++ b/server/log_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/memstore.go b/server/memstore.go index 4da9adff467..581edb5e562 100644 --- a/server/memstore.go +++ b/server/memstore.go @@ -83,6 +83,12 @@ func (ms *memStore) UpdateConfig(cfg *StreamConfig) error { ms.mu.Lock() ms.cfg = *cfg + // Create or delete the THW if needed. + if cfg.AllowMsgTTL && ms.ttls == nil { + ms.ttls = thw.NewHashWheel() + } else if !cfg.AllowMsgTTL && ms.ttls != nil { + ms.ttls = nil + } // Limits checks and enforcement. ms.enforceMsgLimit() ms.enforceBytesLimit() @@ -112,7 +118,7 @@ func (ms *memStore) UpdateConfig(cfg *StreamConfig) error { } ms.mu.Unlock() - if cfg.MaxAge != 0 { + if cfg.MaxAge != 0 || cfg.AllowMsgTTL { ms.expireMsgs() } return nil diff --git a/server/memstore_test.go b/server/memstore_test.go index 77ab3a2c22c..2f95ee3eb8b 100644 --- a/server/memstore_test.go +++ b/server/memstore_test.go @@ -1292,6 +1292,26 @@ func TestMemStoreAllLastSeqs(t *testing.T) { require_True(t, reflect.DeepEqual(seqs, expected)) } +func TestMemStoreUpdateConfigTTLState(t *testing.T) { + cfg := &StreamConfig{ + Name: "zzz", + Subjects: []string{">"}, + Storage: MemoryStorage, + } + ms, err := newMemStore(cfg) + require_NoError(t, err) + defer ms.Stop() + require_Equal(t, ms.ttls, nil) + + cfg.AllowMsgTTL = true + require_NoError(t, ms.UpdateConfig(cfg)) + require_NotEqual(t, ms.ttls, nil) + + cfg.AllowMsgTTL = false + require_NoError(t, ms.UpdateConfig(cfg)) + require_Equal(t, ms.ttls, nil) +} + /////////////////////////////////////////////////////////////////////////// // Benchmarks /////////////////////////////////////////////////////////////////////////// diff --git a/server/monitor.go b/server/monitor.go index c6b16b2142b..cf0bbecf5eb 100644 --- a/server/monitor.go +++ b/server/monitor.go @@ -23,6 +23,7 @@ import ( "encoding/json" "expvar" "fmt" + "maps" "math" "net" "net/http" @@ -282,7 +283,9 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { s.mu.RLock() // Default to all client unless filled in above. if clist == nil { - clist = s.clients + clist = make(map[uint64]*client, len(s.clients)+len(s.leafs)) + maps.Copy(clist, s.clients) + maps.Copy(clist, s.leafs) } // copy the server id for monitoring diff --git a/server/monitor_sort_opts.go b/server/monitor_sort_opts.go index 6ab1095b2e0..3a2a0b667ab 100644 --- a/server/monitor_sort_opts.go +++ b/server/monitor_sort_opts.go @@ -1,4 +1,4 @@ -// Copyright 2013-2023 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/monitor_test.go b/server/monitor_test.go index f3500f3f38e..88aa2016b11 100644 --- a/server/monitor_test.go +++ b/server/monitor_test.go @@ -27,6 +27,7 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "sort" "strings" "sync" @@ -5700,8 +5701,11 @@ func TestMonitorConnzOperatorAccountNames(t *testing.T) { for pollMode := 0; pollMode < 2; pollMode++ { url := fmt.Sprintf("http://127.0.0.1:%d/connz?auth=1", s.MonitorAddr().Port) connz := pollConnz(t, s, pollMode, url, &ConnzOptions{Username: true}) - require_Equal(t, connz.NumConns, 1) - ci := connz.Conns[0] + require_Equal(t, connz.NumConns, 2) + idx := slices.IndexFunc(connz.Conns, func(c *ConnInfo) bool { + return c.Kind == kindStringMap[CLIENT] + }) + ci := connz.Conns[idx] require_Equal(t, ci.Account, accPub) require_Equal(t, ci.NameTag, accName) } @@ -5822,6 +5826,88 @@ func TestMonitorConnzSortByRTT(t *testing.T) { } } +func TestMonitorConnzIncludesLeafnodes(t *testing.T) { + content := ` + server_name: "hub" + listen: "127.0.0.1:-1" + http: "127.0.0.1:-1" + operator = "../test/configs/nkeys/op.jwt" + resolver = MEMORY + ping_interval = 1 + leafnodes { + listen: "127.0.0.1:-1" + } + ` + conf := createConfFile(t, []byte(content)) + sb, ob := RunServerWithConfig(conf) + defer sb.Shutdown() + + createAcc := func(t *testing.T) (*Account, string) { + t.Helper() + acc, akp := createAccount(sb) + kp, _ := nkeys.CreateUser() + pub, _ := kp.PublicKey() + nuc := jwt.NewUserClaims(pub) + ujwt, err := nuc.Encode(akp) + if err != nil { + t.Fatalf("Error generating user JWT: %v", err) + } + seed, _ := kp.Seed() + creds := genCredsFile(t, ujwt, seed) + return acc, creds + } + acc, mycreds := createAcc(t) + leafName := "my-leaf-node" + + content = ` + port: -1 + http: "127.0.0.1:-1" + ping_interval = 1 + server_name: %s + accounts { + %s { + users [ + {user: user1, password: pwd} + ] + } + } + leafnodes { + remotes = [ + { + account: "%s" + url: nats-leaf://127.0.0.1:%d + credentials: '%s' + } + ] + } + ` + config := fmt.Sprintf(content, + leafName, + acc.Name, + acc.Name, ob.LeafNode.Port, mycreds) + conf = createConfFile(t, []byte(config)) + sa, _ := RunServerWithConfig(conf) + defer sa.Shutdown() + + checkFor(t, time.Second, 15*time.Millisecond, func() error { + if n := sa.NumLeafNodes(); n != 1 { + return fmt.Errorf("Expected 1 leaf connection, got %v", n) + } + return nil + }) + + for test, options := range map[string]*ConnzOptions{ + "WithoutAccount": {}, + "WithAccount": {Account: acc.Name}, + } { + t.Run(test, func(t *testing.T) { + c := pollConnz(t, sb, 1, "http://127.0.0.1:%d/connz", options) + require_Equal(t, c.NumConns, 1) + require_Equal(t, c.Conns[0].Kind, kindStringMap[LEAF]) + }) + } +} + // https://github.com/nats-io/nats-server/issues/4144 func TestMonitorAccountszMappingOrderReporting(t *testing.T) { conf := createConfFile(t, []byte(` diff --git a/server/mqtt.go b/server/mqtt.go index 331e688c87a..6d5a49807fb 100644 --- a/server/mqtt.go +++ b/server/mqtt.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/mqtt_ex_bench_test.go b/server/mqtt_ex_bench_test.go index 2e7ce7def2a..2d14c3f78c8 100644 --- a/server/mqtt_ex_bench_test.go +++ b/server/mqtt_ex_bench_test.go @@ -1,4 +1,4 @@ -// Copyright 2024 The NATS Authors +// Copyright 2024-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/mqtt_ex_test_test.go b/server/mqtt_ex_test_test.go index 4416453287b..c5880f38096 100644 --- a/server/mqtt_ex_test_test.go +++ b/server/mqtt_ex_test_test.go @@ -1,4 +1,4 @@ -// Copyright 2024 The NATS Authors +// Copyright 2024-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/mqtt_test.go b/server/mqtt_test.go index 349184f7f8c..e779096ea07 100644 --- a/server/mqtt_test.go +++ b/server/mqtt_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/msgtrace.go b/server/msgtrace.go index e3e73421b19..3f995f069ce 100644 --- a/server/msgtrace.go +++ b/server/msgtrace.go @@ -1,4 +1,4 @@ -// Copyright 2024 The NATS Authors +// Copyright 2024-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/nkey.go b/server/nkey.go index 0e5d0ee08bf..f0579177cf0 100644 --- a/server/nkey.go +++ b/server/nkey.go @@ -1,4 +1,4 @@ -// Copyright 2018-2023 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/norace_1_test.go b/server/norace_1_test.go index b3612660c52..8882d1881a9 100644 --- a/server/norace_1_test.go +++ b/server/norace_1_test.go @@ -4998,7 +4998,7 @@ func TestNoRaceJetStreamAccountLimitsAndRestart(t *testing.T) { c.waitOnLeader() c.waitOnStreamLeader("$JS", "TEST") - checkFor(t, 2*time.Second, 500*time.Millisecond, func() error { + checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { return checkState(t, c, "$JS", "TEST") }) for _, cs := range c.servers { diff --git a/server/norace_2_test.go b/server/norace_2_test.go index c0d0f46e733..9dd90934ba0 100644 --- a/server/norace_2_test.go +++ b/server/norace_2_test.go @@ -1235,6 +1235,15 @@ func TestNoRaceJetStreamKVReplaceWithServerRestart(t *testing.T) { }) require_NoError(t, err) + // Manually disable direct get on underlying stream, since this test + // relies on immediate consistency which we can't guarantee with direct gets + // until we provide a solution for this. + si, err := js.StreamInfo("KV_TEST") + require_NoError(t, err) + si.Config.AllowDirect = false + _, err = js.UpdateStream(&si.Config) + require_NoError(t, err) + createData := func(n int) []byte { const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" b := make([]byte, n) @@ -1247,6 +1256,11 @@ func TestNoRaceJetStreamKVReplaceWithServerRestart(t *testing.T) { _, err = kv.Create("foo", createData(160)) require_NoError(t, err) + // Ensure all replicas have applied the key. + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + return checkState(t, c, globalAccountName, "KV_TEST") + }) + ch := make(chan struct{}) wg := sync.WaitGroup{} @@ -1263,6 +1277,7 @@ func TestNoRaceJetStreamKVReplaceWithServerRestart(t *testing.T) { for { select { case <-ch: + close(errCh) return default: k, err := kv.Get("foo") @@ -1285,7 +1300,7 @@ func TestNoRaceJetStreamKVReplaceWithServerRestart(t *testing.T) { time.Sleep(2 * time.Second) for _, s := range c.servers { s.Shutdown() - // Need to leave servers down for awhile to trigger bug properly. + // Need to leave servers down for a while to trigger bug properly. time.Sleep(5 * time.Second) s = c.restartServer(s) c.waitOnServerHealthz(s) diff --git a/server/ocsp.go b/server/ocsp.go index cf3b1504148..0239c65956a 100644 --- a/server/ocsp.go +++ b/server/ocsp.go @@ -1,4 +1,4 @@ -// Copyright 2021-2024 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/ocsp_peer.go b/server/ocsp_peer.go index fd735094879..22d42ef6654 100644 --- a/server/ocsp_peer.go +++ b/server/ocsp_peer.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/opts_test.go b/server/opts_test.go index bb6157fbd63..273936964a1 100644 --- a/server/opts_test.go +++ b/server/opts_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/parser.go b/server/parser.go index 3ba608d6c3b..58d034a6b9f 100644 --- a/server/parser.go +++ b/server/parser.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/ping_test.go b/server/ping_test.go index 2c6fd883cdc..482c4d30fc3 100644 --- a/server/ping_test.go +++ b/server/ping_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2020 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_freebsd_cgo.go b/server/pse/pse_freebsd_cgo.go index 745a20dbc9f..e759a482981 100644 --- a/server/pse/pse_freebsd_cgo.go +++ b/server/pse/pse_freebsd_cgo.go @@ -1,4 +1,4 @@ -// Copyright 2015-2021 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_linux.go b/server/pse/pse_linux.go index 5414b327d61..aa26532e962 100644 --- a/server/pse/pse_linux.go +++ b/server/pse/pse_linux.go @@ -1,4 +1,4 @@ -// Copyright 2015-2022 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_rumprun.go b/server/pse/pse_rumprun.go index d16e6ea95bd..94ddf8ff9e7 100644 --- a/server/pse/pse_rumprun.go +++ b/server/pse/pse_rumprun.go @@ -1,4 +1,4 @@ -// Copyright 2015-2021 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_test.go b/server/pse/pse_test.go index 890f9645bc2..35bec2d2568 100644 --- a/server/pse/pse_test.go +++ b/server/pse/pse_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ import ( "os" "os/exec" "runtime" + "runtime/debug" "testing" ) @@ -41,7 +42,7 @@ func TestPSEmulation(t *testing.T) { psRss *= 1024 // 1k blocks, want bytes. psVss *= 1024 // 1k blocks, want bytes. - runtime.GC() + debug.FreeOSMemory() // Our internal version ProcUsage(&pcpu, &rss, &vss) diff --git a/server/pse/pse_wasm.go b/server/pse/pse_wasm.go index e3db060c804..6fcfda7377a 100644 --- a/server/pse/pse_wasm.go +++ b/server/pse/pse_wasm.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_windows.go b/server/pse/pse_windows.go index 09f84a0ccb3..ab0addee93f 100644 --- a/server/pse/pse_windows.go +++ b/server/pse/pse_windows.go @@ -1,4 +1,4 @@ -// Copyright 2015-2024 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_windows_test.go b/server/pse/pse_windows_test.go index 1ba3a6cbbe0..ba395351f27 100644 --- a/server/pse/pse_windows_test.go +++ b/server/pse/pse_windows_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2021 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/pse/pse_zos.go b/server/pse/pse_zos.go index df469f4e1e4..8596c497533 100644 --- a/server/pse/pse_zos.go +++ b/server/pse/pse_zos.go @@ -1,4 +1,4 @@ -// Copyright 2023 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/raft.go b/server/raft.go index 370de631eb6..1111ab277e1 100644 --- a/server/raft.go +++ b/server/raft.go @@ -188,7 +188,7 @@ type raft struct { isSysAcc atomic.Bool // Are we utilizing the system account? maybeLeader bool // The group had a preferred leader. And is maybe already acting as leader prior to scale up. - observer bool // The node is observing, i.e. not participating in voting + observer bool // The node is observing, i.e. not able to become leader extSt extensionState // Extension state @@ -2212,12 +2212,13 @@ var aePool = sync.Pool{ // appendEntry is the main struct that is used to sync raft peers. type appendEntry struct { leader string // The leader that this append entry came from. - term uint64 // The current term, as the leader understands it. - commit uint64 // The commit index, as the leader understands it. + term uint64 // The term when this entry was stored. + commit uint64 // The commit index of the leader when this append entry was sent. pterm uint64 // The previous term, for checking consistency. pindex uint64 // The previous commit index, for checking consistency. entries []*Entry // Entries to process. // Below fields are for internal use only: + lterm uint64 // The highest term for catchups only, as the leader understands it. (If lterm=0, use term instead) reply string // Reply subject to respond to once committed. sub *subscription // The subscription that the append entry came in on. buf []byte @@ -2227,7 +2228,7 @@ type appendEntry struct { func newAppendEntry(leader string, term, commit, pterm, pindex uint64, entries []*Entry) *appendEntry { ae := aePool.Get().(*appendEntry) ae.leader, ae.term, ae.commit, ae.pterm, ae.pindex, ae.entries = leader, term, commit, pterm, pindex, entries - ae.reply, ae.sub, ae.buf = _EMPTY_, nil, nil + ae.lterm, ae.reply, ae.sub, ae.buf = 0, _EMPTY_, nil, nil return ae } @@ -2311,34 +2312,40 @@ func (ae *appendEntry) encode(b []byte) ([]byte, error) { var elen int for _, e := range ae.entries { + if len(e.Data) > math.MaxUint32 { + return nil, errBadAppendEntry + } elen += len(e.Data) + 1 + 4 // 1 is type, 4 is for size. } - tlen := appendEntryBaseLen + elen + 1 + // Uvarint for lterm can be a maximum 10 bytes for a uint64. + var _lterm [10]byte + lterm := _lterm[:binary.PutUvarint(_lterm[:], ae.lterm)] + tlen := appendEntryBaseLen + elen + len(lterm) var buf []byte if cap(b) >= tlen { - buf = b[:tlen] + buf = b[:idLen] } else { - buf = make([]byte, tlen) + buf = make([]byte, idLen, tlen) } var le = binary.LittleEndian copy(buf[:idLen], ae.leader) - le.PutUint64(buf[8:], ae.term) - le.PutUint64(buf[16:], ae.commit) - le.PutUint64(buf[24:], ae.pterm) - le.PutUint64(buf[32:], ae.pindex) - le.PutUint16(buf[40:], uint16(len(ae.entries))) - wi := 42 + buf = le.AppendUint64(buf, ae.term) + buf = le.AppendUint64(buf, ae.commit) + buf = le.AppendUint64(buf, ae.pterm) + buf = le.AppendUint64(buf, ae.pindex) + buf = le.AppendUint16(buf, uint16(len(ae.entries))) for _, e := range ae.entries { - le.PutUint32(buf[wi:], uint32(len(e.Data)+1)) - wi += 4 - buf[wi] = byte(e.Type) - wi++ - copy(buf[wi:], e.Data) - wi += len(e.Data) + buf = le.AppendUint32(buf, uint32(1+len(e.Data))) + buf = append(buf, byte(e.Type)) + buf = append(buf, e.Data...) } - return buf[:wi], nil + // This is safe because old nodes will ignore bytes after the + // encoded messages. Nodes that are aware of this will decode + // it correctly. + buf = append(buf, lterm...) + return buf, nil } // This can not be used post the wire level callback since we do not copy. @@ -2353,19 +2360,24 @@ func (n *raft) decodeAppendEntry(msg []byte, sub *subscription, reply string) (* ae.reply, ae.sub = reply, sub // Decode Entries. - ne, ri := int(le.Uint16(msg[40:])), 42 - for i, max := 0, len(msg); i < ne; i++ { + ne, ri := int(le.Uint16(msg[40:])), uint64(42) + for i, max := 0, uint64(len(msg)); i < ne; i++ { if ri >= max-1 { return nil, errBadAppendEntry } - le := int(le.Uint32(msg[ri:])) + ml := uint64(le.Uint32(msg[ri:])) ri += 4 - if le <= 0 || ri+le > max { + if ml <= 0 || ri+ml > max { return nil, errBadAppendEntry } - entry := newEntry(EntryType(msg[ri]), msg[ri+1:ri+le]) + entry := newEntry(EntryType(msg[ri]), msg[ri+1:ri+ml]) ae.entries = append(ae.entries, entry) - ri += le + ri += ml + } + if len(msg[ri:]) > 0 { + if lterm, n := binary.Uvarint(msg[ri:]); n > 0 { + ae.lterm = lterm + } } ae.buf = msg return ae, nil @@ -2679,7 +2691,7 @@ func (n *raft) loadFirstEntry() (ae *appendEntry, err error) { func (n *raft) runCatchup(ar *appendEntryResponse, indexUpdatesQ *ipQueue[uint64]) { n.RLock() s, reply := n.s, n.areply - peer, subj, last := ar.peer, ar.reply, n.pindex + peer, subj, term, last := ar.peer, ar.reply, n.term, n.pindex n.RUnlock() defer s.grWG.Done() @@ -2719,6 +2731,14 @@ func (n *raft) runCatchup(ar *appendEntryResponse, indexUpdatesQ *ipQueue[uint64 } return true } + // Re-encode with the lterm if needed + if ae.lterm != term { + ae.lterm = term + if ae.buf, err = ae.encode(ae.buf[:0]); err != nil { + n.warn("Got an error re-encoding append entry: %v", err) + return true + } + } // Update our tracking total. om[next] = len(ae.buf) total += len(ae.buf) @@ -3015,6 +3035,12 @@ func (n *raft) trackResponse(ar *appendEntryResponse) { n.Lock() + // Check state under lock, we might not be leader anymore. + if n.State() != Leader { + n.Unlock() + return + } + // Update peer's last index. if ps := n.peers[ar.peer]; ps != nil && ar.index > ps.li { ps.li = ar.index @@ -3190,7 +3216,7 @@ func (n *raft) runAsCandidate() { // handleAppendEntry handles an append entry from the wire. This function // is an internal callback from the "asubj" append entry subscription. -func (n *raft) handleAppendEntry(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { +func (n *raft) handleAppendEntry(sub *subscription, c *client, _ *Account, _, reply string, msg []byte) { msg = copyBytes(msg) if ae, err := n.decodeAppendEntry(msg, sub, reply); err == nil { // Push to the new entry channel. From here one of the worker @@ -3259,7 +3285,11 @@ func (n *raft) truncateWAL(term, index uint64) { n.debug("Truncating and repairing WAL to Term %d Index %d", term, index) if term == 0 && index == 0 { - n.warn("Resetting WAL state") + if n.commit > 0 { + n.warn("Resetting WAL state") + } else { + n.debug("Clearing WAL state (no commits)") + } } defer func() { @@ -3412,12 +3442,13 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { } // Check state if we are catching up. + var resetCatchingUp bool if catchingUp { if cs := n.catchup; cs != nil && n.pterm >= cs.cterm && n.pindex >= cs.cindex { // If we are here we are good, so if we have a catchup pending we can cancel. n.cancelCatchup() // Reset our notion of catching up. - catchingUp = false + resetCatchingUp = true } else if isNew { var ar *appendEntryResponse var inbox string @@ -3437,9 +3468,17 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { } } + // Grab term from append entry. But if leader explicitly defined its term, use that instead. + // This is required during catchup if the leader catches us up on older items from previous terms. + // While still allowing us to confirm they're matching our highest known term. + lterm := ae.term + if ae.lterm != 0 { + lterm = ae.lterm + } + // If this term is greater than ours. - if ae.term > n.term { - n.term = ae.term + if lterm > n.term { + n.term = lterm n.vote = noVote if isNew { n.writeTermVote() @@ -3448,8 +3487,11 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.debug("Term higher than ours and we are not a follower: %v, stepping down to %q", n.State(), ae.leader) n.stepdownLocked(ae.leader) } - } else if ae.term < n.term && !catchingUp && isNew { - n.debug("Rejected AppendEntry from a leader (%s) with term %d which is less than ours", ae.leader, ae.term) + } else if lterm < n.term && sub != nil && !(catchingUp && ae.lterm == 0) { + // Anything that's below our expected highest term needs to be rejected. + // Unless we're replaying (sub=nil), in which case we'll always continue. + // For backward-compatibility we shouldn't reject if we're being caught up by an old server. + n.debug("Rejected AppendEntry from a leader (%s) with term %d which is less than ours", ae.leader, lterm) ar := newAppendEntryResponse(n.term, n.pindex, n.id, false) n.Unlock() n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) @@ -3457,6 +3499,11 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { return } + // Reset after checking the term is correct, because we use catchingUp in a condition above. + if resetCatchingUp { + catchingUp = false + } + if isNew && n.leader != ae.leader && n.State() == Follower { n.debug("AppendEntry updating leader to %q", ae.leader) n.updateLeader(ae.leader) @@ -3671,8 +3718,10 @@ CONTINUE: } } + // Only ever respond to new entries. + // Never respond to catchup messages, because providing quorum based on this is unsafe. var ar *appendEntryResponse - if sub != nil { + if sub != nil && isNew { ar = newAppendEntryResponse(n.pterm, n.pindex, n.id, true) } n.Unlock() @@ -3715,11 +3764,19 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { if ar.success { // The remote node successfully committed the append entry. + // They agree with our leadership and are happy with the state of the log. + // In this case ar.term doesn't matter. n.trackResponse(ar) arPool.Put(ar) + } else if ar.reply != _EMPTY_ { + // The remote node didn't commit the append entry, and they believe they + // are behind and have specified a reply subject, so let's try to catch them up. + // In this case ar.term was populated with the remote's pterm. + n.catchupFollower(ar) } else if ar.term > n.term { // The remote node didn't commit the append entry, it looks like // they are on a newer term than we are. Step down. + // In this case ar.term was populated with the remote's term. n.Lock() n.term = ar.term n.vote = noVote @@ -3728,10 +3785,9 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { n.stepdownLocked(noLeader) n.Unlock() arPool.Put(ar) - } else if ar.reply != _EMPTY_ { - // The remote node didn't commit the append entry and they are - // still on the same term, so let's try to catch them up. - n.catchupFollower(ar) + } else { + // Ignore, but return back to pool. + arPool.Put(ar) } } diff --git a/server/raft_helpers_test.go b/server/raft_helpers_test.go index 8b9fc696725..7b583f4ff69 100644 --- a/server/raft_helpers_test.go +++ b/server/raft_helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ package server import ( "encoding/binary" + "errors" "fmt" "math/rand" "sync" @@ -329,14 +330,15 @@ func (a *stateAdder) snapshot(t *testing.T) { func (rg smGroup) waitOnTotal(t *testing.T, expected int64) { t.Helper() checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { + var err error for _, sm := range rg { asm := sm.(*stateAdder) if total := asm.total(); total != expected { - return fmt.Errorf("Adder on %v has wrong total: %d vs %d", - asm.server(), total, expected) + err = errors.Join(err, fmt.Errorf("Adder on %v has wrong total: %d vs %d", + asm.server(), total, expected)) } } - return nil + return err }) } diff --git a/server/raft_test.go b/server/raft_test.go index 2f6015fd3b1..939ddd64b80 100644 --- a/server/raft_test.go +++ b/server/raft_test.go @@ -14,7 +14,6 @@ package server import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -23,6 +22,8 @@ import ( "os" "path" "path/filepath" + "reflect" + "strings" "testing" "time" @@ -133,7 +134,7 @@ func TestNRGAppendEntryDecode(t *testing.T) { // Truncate buffer first. var node *raft - short := buf[0 : len(buf)-1024] + short := buf[0 : len(buf)-1025] _, err = node.decodeAppendEntry(short, nil, _EMPTY_) require_Error(t, err, errBadAppendEntry) @@ -1093,7 +1094,7 @@ func TestNRGWALEntryWithoutQuorumMustTruncate(t *testing.T) { // The previous leader's WAL should truncate to remove the AppendEntry only it has. // Eventually all WALs for all peers must match. checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { - var expected [][]byte + var expected []*appendEntry for _, a := range rg { an := a.node().(*raft) var state StreamState @@ -1107,11 +1108,13 @@ func TestNRGWALEntryWithoutQuorumMustTruncate(t *testing.T) { if err != nil { return err } + ae.buf = nil // ... as we'll deeply check everything else in the AE. + ae.lterm = 0 // ... as lterm can differ if one node caught up another. seq := int(index) if len(expected) < seq { - expected = append(expected, ae.buf) - } else if !bytes.Equal(expected[seq-1], ae.buf) { - return fmt.Errorf("WAL is different: stored bytes differ") + expected = append(expected, ae) + } else if !reflect.DeepEqual(expected[seq-1], ae) { + return fmt.Errorf("WAL is different: stored AEs differ") } } } @@ -1566,6 +1569,7 @@ func TestNRGSnapshotAndTruncateToApplied(t *testing.T) { n.Applied(1) // Send heartbeat, which commits the second message. + n.switchToLeader() n.processAppendEntryResponse(&appendEntryResponse{ term: aeHeartbeat1.term, index: aeHeartbeat1.pindex, @@ -1597,6 +1601,7 @@ func TestNRGSnapshotAndTruncateToApplied(t *testing.T) { require_Equal(t, entry.leader, nats0) // Receive heartbeat from new leader, should not lose commits. + n.stepdown(noLeader) n.processAppendEntry(aeHeartbeat2, n.aesub) require_Equal(t, n.wal.State().Msgs, 0) require_Equal(t, n.commit, 2) @@ -2363,6 +2368,377 @@ func TestNRGSignalLeadChangeFalseIfCampaignImmediately(t *testing.T) { } } +func TestNRGCatchupDontCountTowardQuorum(t *testing.T) { + n, cleanup := initSingleMemRaftNode(t) + defer cleanup() + + // Create a sample entry, the content doesn't matter, just that it's stored. + esm := encodeStreamMsgAllowCompress("foo", "_INBOX.foo", nil, nil, 0, 0, true) + entries := []*Entry{newEntry(EntryNormal, esm)} + + nats0 := "S1Nunr6R" // "nats-0" + + aeReply := "$TEST" + nc, err := nats.Connect(n.s.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) + require_NoError(t, err) + defer nc.Close() + + sub, err := nc.SubscribeSync(aeReply) + require_NoError(t, err) + defer sub.Drain() + + // Timeline + aeMissedMsg := encode(t, &appendEntry{leader: nats0, term: 1, commit: 0, pterm: 0, pindex: 0, entries: entries, reply: aeReply}) + ae := appendEntry{leader: nats0, term: 1, commit: 0, pterm: 1, pindex: 1, entries: entries, reply: aeReply} + aeCatchupTrigger := encode(t, &ae) + aeHeartbeat := encode(t, &appendEntry{leader: nats0, term: 1, commit: 0, pterm: 1, pindex: 2, entries: nil, reply: aeReply}) + + // Simulate we missed all messages up to this point. + n.processAppendEntry(aeCatchupTrigger, n.aesub) + require_True(t, n.catchup != nil) + require_Equal(t, n.catchup.pterm, 0) // n.pterm + require_Equal(t, n.catchup.pindex, 0) // n.pindex + require_Equal(t, n.catchup.cterm, ae.pterm) + require_Equal(t, n.catchup.cindex, ae.pindex) + + // Should reply we require catchup. + msg, err := sub.NextMsg(time.Second) + require_NoError(t, err) + ar := n.decodeAppendEntryResponse(msg.Data) + require_Equal(t, ar.index, 0) + require_False(t, ar.success) + require_True(t, strings.HasPrefix(msg.Reply, "$NRG.CR")) + + // Should NEVER respond to catchup messages. + n.processAppendEntry(aeMissedMsg, n.catchup.sub) + _, err = sub.NextMsg(time.Second) + require_Error(t, err, nats.ErrTimeout) + + n.processAppendEntry(aeCatchupTrigger, n.catchup.sub) + _, err = sub.NextMsg(time.Second) + require_Error(t, err, nats.ErrTimeout) + + // Now we've received all messages, stop catchup, and respond success to new message. + n.processAppendEntry(aeHeartbeat, n.aesub) + msg, err = sub.NextMsg(time.Second) + require_NoError(t, err) + ar = n.decodeAppendEntryResponse(msg.Data) + require_Equal(t, ar.index, aeHeartbeat.pindex) + require_True(t, ar.success) + require_Equal(t, msg.Reply, _EMPTY_) +} + +func TestNRGIgnoreTrackResponseWhenNotLeader(t *testing.T) { + n, cleanup := initSingleMemRaftNode(t) + defer cleanup() + + // Create a sample entry, the content doesn't matter, just that it's stored. + esm := encodeStreamMsgAllowCompress("foo", "_INBOX.foo", nil, nil, 0, 0, true) + entries := []*Entry{newEntry(EntryNormal, esm)} + + // Switch this node to leader, and send two entries. The first will get quorum, the second will not. + n.term++ + n.switchToLeader() + require_Equal(t, n.term, 1) + require_Equal(t, n.pindex, 0) + n.sendAppendEntry(entries) + require_Equal(t, n.pindex, 1) + require_Equal(t, n.pterm, 1) + require_Equal(t, n.commit, 0) + + // Step down + n.stepdown(noLeader) + require_Equal(t, n.pindex, 1) + require_Equal(t, n.pterm, 1) + require_Equal(t, n.commit, 0) + + // Normally would commit the entry, but since we're not leader anymore we should ignore it. + n.trackResponse(&appendEntryResponse{1, 1, "peer", _EMPTY_, true}) + require_Equal(t, n.commit, 0) +} + +func TestNRGRejectNewAppendEntryFromPreviousLeader(t *testing.T) { + n, cleanup := initSingleMemRaftNode(t) + defer cleanup() + + // Create a sample entry, the content doesn't matter, just that it's stored. + esm := encodeStreamMsgAllowCompress("foo", "_INBOX.foo", nil, nil, 0, 0, true) + entries := []*Entry{newEntry(EntryNormal, esm)} + + nats0 := "S1Nunr6R" // "nats-0" + + // Timeline + aeMsg1 := encode(t, &appendEntry{leader: nats0, term: 1, commit: 0, pterm: 0, pindex: 0, entries: entries}) + aeMsg2 := encode(t, &appendEntry{leader: nats0, term: 1, commit: 1, pterm: 1, pindex: 1, entries: entries}) + + // Accept first message because it equals our term. + n.term = 1 + n.processAppendEntry(aeMsg1, n.aesub) + require_Equal(t, n.pterm, 1) + require_Equal(t, n.pindex, 1) + + // We are part of the successful vote for a new leader under a new term. + require_NoError(t, n.processVoteRequest(&voteRequest{term: 5, lastTerm: 1, lastIndex: 2})) + + // Must reject entry from a previous term. + n.processAppendEntry(aeMsg2, n.aesub) + require_Equal(t, n.pterm, 1) + require_Equal(t, n.pindex, 1) +} + +func TestNRGRejectAppendEntryDuringCatchupFromPreviousLeader(t *testing.T) { + test := func(t *testing.T, isCatchingUp, oldBehavior bool) { + n, cleanup := initSingleMemRaftNode(t) + defer cleanup() + + // Create a sample entry, the content doesn't matter, just that it's stored. + esm := encodeStreamMsgAllowCompress("foo", "_INBOX.foo", nil, nil, 0, 0, true) + entries := []*Entry{newEntry(EntryNormal, esm)} + + nats0 := "S1Nunr6R" // "nats-0" + + // Timeline + aeMsg1 := encode(t, &appendEntry{leader: nats0, term: 1, commit: 0, pterm: 0, pindex: 0, entries: entries}) + aeMsg2 := encode(t, &appendEntry{leader: nats0, term: 1, commit: 1, pterm: 1, pindex: 1, entries: entries}) + + // Accept first message because it equals our term. + n.term = 1 + n.processAppendEntry(aeMsg2, n.aesub) + require_True(t, n.catchup != nil) + require_Equal(t, n.catchup.pterm, 0) // n.pterm + require_Equal(t, n.catchup.pindex, 0) // n.pindex + require_Equal(t, n.catchup.cterm, aeMsg2.pterm) + require_Equal(t, n.catchup.cindex, aeMsg2.pindex) + + // Under the new behavior the term of the leader that's doing the catchup is included. + if !oldBehavior { + aeMsg1.lterm = 1 + aeMsg2.lterm = 1 + } + + // First catchup message is accepted. + catchup := n.catchup + n.processAppendEntry(aeMsg1, catchup.sub) + require_Equal(t, n.pterm, 1) + require_Equal(t, n.pindex, 1) + + // We are part of the successful vote for a new leader under a new term. + require_NoError(t, n.processVoteRequest(&voteRequest{term: 5, lastTerm: 1, lastIndex: 2})) + + // Voting cancels catchup. For testing, revert that so we can test + // what a catchup message after upping the term does. + nsub := n.aesub + if isCatchingUp { + n.catchup = catchup + nsub = catchup.sub + } + + // Now send the second catchup entry. + n.processAppendEntry(aeMsg2, nsub) + require_True(t, n.catchup == nil) + require_Equal(t, n.pterm, 1) + + // Under the old behavior this entry is wrongly accepted. + // A new server will also know to be backward-compatible if being caught up by an old server. + if isCatchingUp && oldBehavior { + require_Equal(t, n.pindex, 2) + } else { + // Under the new behavior the entry is correctly accepted. + require_Equal(t, n.pindex, 1) + } + } + + for _, isCatchingUp := range []bool{false, true} { + for _, oldBehavior := range []bool{false, true} { + title := "new-entry" + if isCatchingUp { + title = "catchup" + } + if oldBehavior { + title += "-backward-compatible" + } + t.Run(title, func(t *testing.T) { test(t, isCatchingUp, oldBehavior) }) + } + } +} + +func TestNRGDontRejectAppendEntryFromReplay(t *testing.T) { + test := func(t *testing.T, restart bool) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + rg := c.createRaftGroup("TEST", 3, newStateAdder) + rg.waitOnLeader() + + // Stop all servers except the leader. + l := rg.leader().(*stateAdder) + rs := rg.nonLeader() + for _, sm := range rg { + if sm != l { + sm.stop() + } + } + + // Propose a new entry to the leader and confirm it's stored in its log. + pindex, _, _ := l.node().Progress() + l.proposeDelta(10) + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + if index, _, _ := l.node().Progress(); index == pindex { + return errors.New("proposal not stored yet") + } + return nil + }) + + // Optionally restart the leader, which will require replay of the entries in the log. + if restart { + l.stop() + l.restart() + } + + // Shutdown server should be caught up. + rs.restart() + + // Wait for both online servers to have applied the delta. + checkFor(t, 5*time.Second, 200*time.Millisecond, func() (err error) { + for _, sm := range rg { + if sm != l && sm != rs { + break + } + if total := sm.(*stateAdder).total(); total != 10 { + err = errors.Join(err, fmt.Errorf("expected 10 total, got: %d", total)) + } + } + return err + }) + } + + t.Run("no-restart", func(t *testing.T) { test(t, false) }) + t.Run("with-restart", func(t *testing.T) { test(t, true) }) +} + +func TestNRGSimpleCatchup(t *testing.T) { + test := func(t *testing.T, leaderChange bool) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + rg := c.createRaftGroup("TEST", 3, newStateAdder) + rg.waitOnLeader() + + // Shutdown a single group, we'll start it later for catchup. + nl := rg.nonLeader() + nl.stop() + + l := rg.leader().(*stateAdder) + l.proposeDelta(10) + + // Wait for remaining servers to have applied the delta. + checkFor(t, 2*time.Second, 200*time.Millisecond, func() (err error) { + for _, sm := range rg { + total := sm.(*stateAdder).total() + if sm == nl && total != 0 { + err = errors.Join(err, errors.New("expected shutdown server to not get data")) + } else if sm != nl && total != 10 { + err = errors.Join(err, fmt.Errorf("expected 10 total, got: %d", total)) + } + } + return err + }) + + if leaderChange { + require_NoError(t, l.node().StepDown()) + } + + // Shutdown server should be caught up. + nl.restart() + rg.waitOnTotal(t, 10) + } + + t.Run("same-leader", func(t *testing.T) { test(t, false) }) + t.Run("change-leader", func(t *testing.T) { test(t, true) }) +} + +func TestNRGSnapshotCatchup(t *testing.T) { + test := func(t *testing.T, restart bool) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + rg := c.createRaftGroup("TEST", 3, newStateAdder) + rg.waitOnLeader() + + l := rg.leader().(*stateAdder) + var s1 stateMachine + var s2 stateMachine + for _, sm := range rg { + if sm == l { + continue + } + if s1 == nil { + s1 = sm + } else { + s2 = sm + } + } + + // Stop one non-leader server. + s1.stop() + + // Wait for both online servers to have applied the delta. + l.proposeDelta(10) + checkFor(t, 5*time.Second, 200*time.Millisecond, func() (err error) { + for _, sm := range rg { + if sm == s1 { + continue + } + if total := sm.(*stateAdder).total(); total != 10 { + err = errors.Join(err, fmt.Errorf("expected 10 total, got: %d", total)) + } + } + return err + }) + + // Shutdown last non-leader server. + s2.stop() + + // Snapshot so outdated server needs to catchup based on it. + l.snapshot(t) + + // Propose a new entry to the leader and confirm it's stored in its log. + pindex, _, _ := l.node().Progress() + l.proposeDelta(10) + checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { + if index, _, _ := l.node().Progress(); index == pindex { + return errors.New("proposal not stored yet") + } + return nil + }) + + // Optionally restart the leader, which will require replay of the entries in the log. + if restart { + l.stop() + l.restart() + } + + // Shutdown server should be caught up. + s1.restart() + + // Wait for both online servers to have applied the delta. + checkFor(t, 5*time.Second, 200*time.Millisecond, func() (err error) { + for _, sm := range rg { + if sm == s2 { + break + } + if total := sm.(*stateAdder).total(); total != 20 { + err = errors.Join(err, fmt.Errorf("expected 20 total, got: %d", total)) + } + } + return err + }) + } + + t.Run("no-restart", func(t *testing.T) { test(t, false) }) + t.Run("with-restart", func(t *testing.T) { test(t, true) }) +} + // This is a RaftChainOfBlocks test where a block is proposed and then we wait for all replicas to apply it before // proposing the next one. // The test may fail if: diff --git a/server/rate_counter.go b/server/rate_counter.go index 247793744da..0988b83941f 100644 --- a/server/rate_counter.go +++ b/server/rate_counter.go @@ -1,4 +1,4 @@ -// Copyright 2021-2021 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/rate_counter_test.go b/server/rate_counter_test.go index cdd9c331622..8b609af33d4 100644 --- a/server/rate_counter_test.go +++ b/server/rate_counter_test.go @@ -1,4 +1,4 @@ -// Copyright 2021-2021 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/reload.go b/server/reload.go index 70f09a904bd..784b905778b 100644 --- a/server/reload.go +++ b/server/reload.go @@ -1,4 +1,4 @@ -// Copyright 2017-2024 The NATS Authors +// Copyright 2017-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/reload_test.go b/server/reload_test.go index ba068dbf9ff..99bf4991dc8 100644 --- a/server/reload_test.go +++ b/server/reload_test.go @@ -1,4 +1,4 @@ -// Copyright 2017-2024 The NATS Authors +// Copyright 2017-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/ring.go b/server/ring.go index 1db3961382a..2cfa05faa68 100644 --- a/server/ring.go +++ b/server/ring.go @@ -1,4 +1,4 @@ -// Copyright 2018-2020 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/routes_test.go b/server/routes_test.go index af0ec3bbe74..941312a85ea 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sendq.go b/server/sendq.go index 5018482db5e..9cde157e761 100644 --- a/server/sendq.go +++ b/server/sendq.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/server_test.go b/server/server_test.go index 064db34856b..0f8a8069ae6 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/service.go b/server/service.go index 7822206a625..fe394191b4f 100644 --- a/server/service.go +++ b/server/service.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/service_test.go b/server/service_test.go index 9a850d65ec9..6a204a02ded 100644 --- a/server/service_test.go +++ b/server/service_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/service_windows.go b/server/service_windows.go index eed399f68c9..62a6c00e87b 100644 --- a/server/service_windows.go +++ b/server/service_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2022 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/service_windows_test.go b/server/service_windows_test.go index f4564cc2502..48ec4d409dc 100644 --- a/server/service_windows_test.go +++ b/server/service_windows_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/signal_wasm.go b/server/signal_wasm.go index 7788d3ffeb6..7ee34e4abae 100644 --- a/server/signal_wasm.go +++ b/server/signal_wasm.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/signal_windows.go b/server/signal_windows.go index 2f5a27c51d7..1ecabed4af7 100644 --- a/server/signal_windows.go +++ b/server/signal_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/split_test.go b/server/split_test.go index e3d8dfc92df..7fd17aa13b6 100644 --- a/server/split_test.go +++ b/server/split_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/store_test.go b/server/store_test.go index 05f09993811..a4f8ec8a2d7 100644 --- a/server/store_test.go +++ b/server/store_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ package server import ( "fmt" "testing" + "time" ) func testAllStoreAllPermutations(t *testing.T, compressionAndEncryption bool, cfg StreamConfig, fn func(t *testing.T, fs StreamStore)) { @@ -535,3 +536,50 @@ func TestStorePurgeExZero(t *testing.T) { }, ) } + +func TestStoreUpdateConfigTTLState(t *testing.T) { + config := func() StreamConfig { + return StreamConfig{Name: "TEST", Subjects: []string{"foo"}} + } + testAllStoreAllPermutations( + t, false, config(), + func(t *testing.T, fs StreamStore) { + cfg := config() + switch fs.(type) { + case *fileStore: + cfg.Storage = FileStorage + case *memStore: + cfg.Storage = MemoryStorage + } + + // TTLs disabled at this point so this message should survive. + seq, _, err := fs.StoreMsg("foo", nil, nil, 1) + require_NoError(t, err) + time.Sleep(2 * time.Second) + _, err = fs.LoadMsg(seq, nil) + require_NoError(t, err) + + // Now enable TTLs. + cfg.AllowMsgTTL = true + require_NoError(t, fs.UpdateConfig(&cfg)) + + // TTLs enabled at this point so this message should be cleaned up. + seq, _, err = fs.StoreMsg("foo", nil, nil, 1) + require_NoError(t, err) + time.Sleep(2 * time.Second) + _, err = fs.LoadMsg(seq, nil) + require_Error(t, err) + + // Now disable TTLs again. + cfg.AllowMsgTTL = false + require_NoError(t, fs.UpdateConfig(&cfg)) + + // TTLs disabled again so this message should survive. + seq, _, err = fs.StoreMsg("foo", nil, nil, 1) + require_NoError(t, err) + time.Sleep(2 * time.Second) + _, err = fs.LoadMsg(seq, nil) + require_NoError(t, err) + }, + ) +} diff --git a/server/stream.go b/server/stream.go index 8e8029e4ace..d7b840fe718 100644 --- a/server/stream.go +++ b/server/stream.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "sync" @@ -256,12 +257,20 @@ type StreamSource struct { iname string // For indexing when stream names are the same for multiple sources. } -// ExternalStream allows you to qualify access to a stream source in another account. +// ExternalStream allows you to qualify access to a stream source in another account or domain. type ExternalStream struct { ApiPrefix string `json:"api"` DeliverPrefix string `json:"deliver"` } +// Will return the domain for this external stream. +func (ext *ExternalStream) Domain() string { + if ext == nil || ext.ApiPrefix == _EMPTY_ { + return _EMPTY_ + } + return tokenAt(ext.ApiPrefix, 2) +} + // For managing stream ingest. const ( streamDefaultMaxQueueMsgs = 10_000 @@ -534,8 +543,12 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt s.setIndexName() } + // Hold lock, because we'll be reading from and writing to a shared object. + js.mu.Lock() copyStreamMetadata(cfg, &ocfg) - if reflect.DeepEqual(cfg, &ocfg) { + deepEqual := reflect.DeepEqual(cfg, &ocfg) + js.mu.Unlock() + if deepEqual { if sa != nil { mset.setStreamAssignment(sa) } @@ -2267,8 +2280,12 @@ func (mset *stream) purge(preq *JSApiStreamPurgeRequest) (purged uint64, err err fseq = ss.First } + // Take a copy of cList to avoid o.purge() potentially taking the stream lock and + // violating the lock ordering. mset.clsMu.RLock() - for _, o := range mset.cList { + cList := slices.Clone(mset.cList) + mset.clsMu.RUnlock() + for _, o := range cList { start := fseq o.mu.RLock() // we update consumer sequences if: @@ -2290,7 +2307,6 @@ func (mset *stream) purge(preq *JSApiStreamPurgeRequest) (purged uint64, err err o.purge(start, lseq, isWider) } } - mset.clsMu.RUnlock() return purged, nil } @@ -2333,14 +2349,15 @@ func (mset *stream) eraseMsg(seq uint64) (bool, error) { // Are we a mirror? func (mset *stream) isMirror() bool { - mset.mu.RLock() - defer mset.mu.RUnlock() + mset.cfgMu.RLock() + defer mset.cfgMu.RUnlock() return mset.cfg.Mirror != nil } func (mset *stream) sourcesInfo() (sis []*StreamSourceInfo) { mset.mu.RLock() defer mset.mu.RUnlock() + sis = make([]*StreamSourceInfo, 0, len(mset.sources)) for _, si := range mset.sources { sis = append(sis, mset.sourceInfo(si)) } @@ -2357,7 +2374,7 @@ func (mset *stream) sourceInfo(si *sourceInfo) *StreamSourceInfo { trConfigs := make([]SubjectTransformConfig, len(si.sfs)) for i := range si.sfs { - destination := _EMPTY_ + var destination string if si.trs[i] != nil { destination = si.trs[i].dest } @@ -2395,6 +2412,40 @@ func (mset *stream) mirrorInfo() *StreamSourceInfo { return mset.sourceInfo(mset.mirror) } +// retryDisconnectedSyncConsumers() will check if we have any disconnected +// sync consumers for either mirror or a source and will reset and retry to connect. +func (mset *stream) retryDisconnectedSyncConsumers(remoteDomain string) { + mset.mu.Lock() + defer mset.mu.Unlock() + + // Only applicable if we are the stream leader. + if !mset.isLeader() { + return + } + + // Check mirrors first. + if si := mset.mirror; si != nil { + if si.sub == nil && !si.sip { + if remoteDomain == _EMPTY_ || (mset.cfg.Mirror != nil && mset.cfg.Mirror.External.Domain() == remoteDomain) { + // Need to reset + si.fails = 0 + mset.cancelSourceInfo(si) + mset.scheduleSetupMirrorConsumerRetry() + } + } + } else { + for _, si := range mset.sources { + ss := mset.streamSource(si.iname) + if remoteDomain == _EMPTY_ || (ss != nil && ss.External.Domain() == remoteDomain) { + // Need to reset + si.fails = 0 + mset.cancelSourceInfo(si) + mset.setupSourceConsumer(si.iname, si.sseq+1, time.Time{}) + } + } + } +} + const ( // Our consumer HB interval. sourceHealthHB = 1 * time.Second @@ -2956,6 +3007,10 @@ func (mset *stream) setupMirrorConsumer() error { msgs := mirror.msgs sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { hdr, msg := c.msgParts(copyBytes(rmsg)) // Need to copy. + if len(hdr) > 0 { + // Remove any Nats-Expected- headers as we don't want to validate them. + hdr = removeHeaderIfPrefixPresent(hdr, "Nats-Expected-") + } mset.queueInbound(msgs, subject, reply, hdr, msg, nil, nil) mirror.last.Store(time.Now().UnixNano()) }) @@ -4398,10 +4453,11 @@ func (mset *stream) queueInbound(ib *ipQueue[*inMsg], subj, rply string, hdr, ms im.subj, im.rply, im.hdr, im.msg, im.si, im.mt = subj, rply, hdr, msg, si, mt if _, err := ib.push(im); err != nil { im.returnToPool() - mset.srv.RateLimitWarnf("Dropping messages due to excessive stream ingest rate on '%s' > '%s': %s", mset.acc.Name, mset.name(), err) + streamName := mset.cfg.Name + mset.srv.RateLimitWarnf("Dropping messages due to excessive stream ingest rate on '%s' > '%s': %s", mset.acc.Name, streamName, err) if rply != _EMPTY_ { hdr := []byte("NATS/1.0 429 Too Many Requests\r\n\r\n") - b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: mset.cfg.Name}, Error: NewJSStreamTooManyRequestsError()}) + b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: streamName}, Error: NewJSStreamTooManyRequestsError()}) mset.outq.send(newJSPubMsg(rply, _EMPTY_, _EMPTY_, hdr, b, nil, 0)) } } @@ -4863,30 +4919,34 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, } // For clustering the lower layers will pass our expected lseq. If it is present check for that here. - if lseq > 0 && lseq != (mset.lseq+mset.clfs) { - isMisMatch := true - // We may be able to recover here if we have no state whatsoever, or we are a mirror. - // See if we have to adjust our starting sequence. - if mset.lseq == 0 || mset.cfg.Mirror != nil { - var state StreamState - mset.store.FastState(&state) - if state.FirstSeq == 0 { - mset.store.Compact(lseq + 1) - mset.lseq = lseq - isMisMatch = false + var clfs uint64 + if lseq > 0 { + clfs = mset.getCLFS() + if lseq != (mset.lseq + clfs) { + isMisMatch := true + // We may be able to recover here if we have no state whatsoever, or we are a mirror. + // See if we have to adjust our starting sequence. + if mset.lseq == 0 || mset.cfg.Mirror != nil { + var state StreamState + mset.store.FastState(&state) + if state.FirstSeq == 0 { + mset.store.Compact(lseq + 1) + mset.lseq = lseq + isMisMatch = false + } } - } - // Really is a mismatch. - if isMisMatch { - outq := mset.outq - mset.mu.Unlock() - if canRespond && outq != nil { - resp.PubAck = &PubAck{Stream: name} - resp.Error = ApiErrors[JSStreamSequenceNotMatchErr] - b, _ := json.Marshal(resp) - outq.sendMsg(reply, b) + // Really is a mismatch. + if isMisMatch { + outq := mset.outq + mset.mu.Unlock() + if canRespond && outq != nil { + resp.PubAck = &PubAck{Stream: name} + resp.Error = ApiErrors[JSStreamSequenceNotMatchErr] + b, _ := json.Marshal(resp) + outq.sendMsg(reply, b) + } + return errLastSeqMismatch } - return errLastSeqMismatch } } @@ -5150,7 +5210,6 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, // Assume this will succeed. olmsgId := mset.lmsgId mset.lmsgId = msgId - clfs := mset.clfs mset.lseq++ tierName := mset.tier @@ -5783,7 +5842,8 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { if deleteFlag { n.Delete() sa = mset.sa - } else { + } else if !isShuttingDown { + // Stop Raft, unless JetStream is already shutting down, in which case they'll be stopped separately. n.Stop() } } diff --git a/server/stree/helper_test.go b/server/stree/helper_test.go index 8184fc25b36..be1dce2f15f 100644 --- a/server/stree/helper_test.go +++ b/server/stree/helper_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/stree/parts.go b/server/stree/parts.go index 1254dd8549b..9ac059677e7 100644 --- a/server/stree/parts.go +++ b/server/stree/parts.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/stree/stree.go b/server/stree/stree.go index add5d0a27fb..28dc72f08b8 100644 --- a/server/stree/stree.go +++ b/server/stree/stree.go @@ -428,7 +428,7 @@ func (t *SubjectTree[T]) iter(n node, pre []byte, ordered bool, cb func(subject // aggressively optimize against repeated walks, but is considerably faster // in most cases than intersecting against a potentially large sublist. func LazyIntersect[TL, TR any](tl *SubjectTree[TL], tr *SubjectTree[TR], cb func([]byte, *TL, *TR)) { - if tl.root == nil || tr.root == nil { + if tl == nil || tr == nil || tl.root == nil || tr.root == nil { return } // Iterate over the smaller tree to reduce the number of rounds. diff --git a/server/stree/stree_test.go b/server/stree/stree_test.go index 1851d8ba7b9..396c5d9b999 100644 --- a/server/stree/stree_test.go +++ b/server/stree/stree_test.go @@ -978,3 +978,811 @@ func TestSubjectTreeDeleteShortSubjectNoPanic(t *testing.T) { require_True(t, found) require_Equal(t, *v, 2) } + +func TestSubjectTreeEmpty(t *testing.T) { + // Test Empty on nil tree + var st *SubjectTree[int] + st2 := st.Empty() + require_True(t, st2 != nil) + require_Equal(t, st2.Size(), 0) + + // Test Empty on new tree + st = NewSubjectTree[int]() + require_Equal(t, st.Size(), 0) + st2 = st.Empty() + require_True(t, st2 == st) // Should return same instance + require_Equal(t, st2.Size(), 0) + + // Test Empty on tree with data + st.Insert(b("foo.bar"), 1) + st.Insert(b("foo.baz"), 2) + st.Insert(b("bar.baz"), 3) + require_Equal(t, st.Size(), 3) + + // Empty should clear everything + st2 = st.Empty() + require_True(t, st2 == st) // Should return same instance + require_Equal(t, st.Size(), 0) + require_True(t, st.root == nil) + + // Verify we can't find old entries + _, found := st.Find(b("foo.bar")) + require_False(t, found) + _, found = st.Find(b("foo.baz")) + require_False(t, found) + _, found = st.Find(b("bar.baz")) + require_False(t, found) + + // Verify we can insert new entries after Empty + old, updated := st.Insert(b("new.entry"), 42) + require_True(t, old == nil) + require_False(t, updated) + require_Equal(t, st.Size(), 1) + + v, found := st.Find(b("new.entry")) + require_True(t, found) + require_Equal(t, *v, 42) +} + +func TestSubjectTreeLazyIntersectComprehensive(t *testing.T) { + // Test with nil trees + var st1 *SubjectTree[int] + var st2 *SubjectTree[string] + count := 0 + LazyIntersect(st1, st2, func(key []byte, v1 *int, v2 *string) { + count++ + }) + require_Equal(t, count, 0) + + // Test with one nil tree + st1 = NewSubjectTree[int]() + st1.Insert(b("foo"), 1) + LazyIntersect(st1, st2, func(key []byte, v1 *int, v2 *string) { + count++ + }) + require_Equal(t, count, 0) + + // Test with empty trees + st2 = NewSubjectTree[string]() + LazyIntersect(st1, st2, func(key []byte, v1 *int, v2 *string) { + count++ + }) + require_Equal(t, count, 0) + + // Test with different value types + st1 = NewSubjectTree[int]() + st2 = NewSubjectTree[string]() + + // Add some intersecting keys + st1.Insert(b("foo.bar"), 42) + st2.Insert(b("foo.bar"), "hello") + st1.Insert(b("baz.qux"), 100) + st2.Insert(b("baz.qux"), "world") + + // Add non-intersecting keys + st1.Insert(b("only.in.st1"), 1) + st2.Insert(b("only.in.st2"), "two") + + results := make(map[string]struct { + v1 int + v2 string + }) + + LazyIntersect(st1, st2, func(key []byte, v1 *int, v2 *string) { + results[string(key)] = struct { + v1 int + v2 string + }{*v1, *v2} + }) + + require_Equal(t, len(results), 2) + require_Equal(t, results["foo.bar"].v1, 42) + require_Equal(t, results["foo.bar"].v2, "hello") + require_Equal(t, results["baz.qux"].v1, 100) + require_Equal(t, results["baz.qux"].v2, "world") + + // Test that it iterates over smaller tree + // Create a large tree and a small tree + large := NewSubjectTree[int]() + small := NewSubjectTree[int]() + + // Large tree has many entries + for i := 0; i < 100; i++ { + large.Insert([]byte(fmt.Sprintf("large.%d", i)), i) + } + // Small tree has few entries with some overlap + small.Insert(b("large.5"), 500) + small.Insert(b("large.10"), 1000) + small.Insert(b("large.50"), 5000) + small.Insert(b("small.only"), 999) + + intersectCount := 0 + LazyIntersect(large, small, func(key []byte, v1 *int, v2 *int) { + intersectCount++ + // Verify we get the correct values + switch string(key) { + case "large.5": + require_Equal(t, *v1, 5) + require_Equal(t, *v2, 500) + case "large.10": + require_Equal(t, *v1, 10) + require_Equal(t, *v2, 1000) + case "large.50": + require_Equal(t, *v1, 50) + require_Equal(t, *v2, 5000) + default: + t.Fatalf("Unexpected key: %s", key) + } + }) + require_Equal(t, intersectCount, 3) + + // Test with complex subjects (multiple levels) + st3 := NewSubjectTree[int]() + st4 := NewSubjectTree[int]() + + // Deep nesting + st3.Insert(b("a.b.c.d.e.f.g"), 1) + st4.Insert(b("a.b.c.d.e.f.g"), 2) + + // Partial matches (should not intersect) + st3.Insert(b("a.b.c.d"), 3) + st4.Insert(b("a.b.c.d.e"), 4) + + // Same prefix different suffix + st3.Insert(b("prefix.suffix1"), 5) + st4.Insert(b("prefix.suffix2"), 6) + + intersections := 0 + LazyIntersect(st3, st4, func(key []byte, v1 *int, v2 *int) { + intersections++ + require_Equal(t, string(key), "a.b.c.d.e.f.g") + require_Equal(t, *v1, 1) + require_Equal(t, *v2, 2) + }) + require_Equal(t, intersections, 1) +} + +func TestNode256Operations(t *testing.T) { + // Test node256 creation and basic operations + n := newNode256(b("prefix")) + require_False(t, n.isFull()) // node256 is never full + + // Test findChild when child doesn't exist + child := n.findChild('a') + require_True(t, child == nil) + + // Add a child and find it + leaf := newLeaf(b("suffix"), 42) + n.addChild('a', leaf) + child = n.findChild('a') + require_True(t, child != nil) + require_Equal(t, n.size, uint16(1)) + + // Test iter function + iterCount := 0 + n.iter(func(node) bool { + iterCount++ + return true + }) + require_Equal(t, iterCount, 1) + + // Test iter with early termination + n.addChild('b', newLeaf(b("suffix2"), 43)) + n.addChild('c', newLeaf(b("suffix3"), 44)) + iterCount = 0 + n.iter(func(node) bool { + iterCount++ + return false // Stop after first + }) + require_Equal(t, iterCount, 1) + + // Test children() method + children := n.children() + require_Equal(t, len(children), 256) + + // Test that grow() panics + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "grow can not be called on node256") + } else { + t.Fatal("grow() should panic on node256") + } + }() + n.grow() +} + +func TestNode256Shrink(t *testing.T) { + // To get a node256, we need to go through the progression: + // node4 -> node10 -> node16 -> node48 -> node256 + // We need at least 49 children to get to node256 + + // Create nodes directly to test node256 shrinking + n256 := newNode256(b("prefix")) + + // Add 49 children + for i := 0; i < 49; i++ { + n256.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + require_Equal(t, n256.size, uint16(49)) + + // Shrink should not happen yet (> 48 children) + shrunk := n256.shrink() + require_True(t, shrunk == nil) + + // Delete one to get to 48 children + n256.deleteChild(0) + require_Equal(t, n256.size, uint16(48)) + + // Now shrink should return a node48 + shrunk = n256.shrink() + require_True(t, shrunk != nil) + _, isNode48 := shrunk.(*node48) + require_True(t, isNode48) + + // Verify the shrunk node has all remaining children + for i := 1; i < 49; i++ { + child := shrunk.findChild(byte(i)) + require_True(t, child != nil) + } +} + +func TestLeafPanicMethods(t *testing.T) { + leaf := newLeaf(b("test"), 42) + + // Test setPrefix panic + t.Run("setPrefix", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "setPrefix called on leaf") + } else { + t.Fatal("setPrefix should panic on leaf") + } + }() + leaf.setPrefix(b("prefix")) + }) + + // Test addChild panic + t.Run("addChild", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "addChild called on leaf") + } else { + t.Fatal("addChild should panic on leaf") + } + }() + leaf.addChild('a', nil) + }) + + // Test findChild panic + t.Run("findChild", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "findChild called on leaf") + } else { + t.Fatal("findChild should panic on leaf") + } + }() + leaf.findChild('a') + }) + + // Test grow panic + t.Run("grow", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "grow called on leaf") + } else { + t.Fatal("grow should panic on leaf") + } + }() + leaf.grow() + }) + + // Test deleteChild panic + t.Run("deleteChild", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "deleteChild called on leaf") + } else { + t.Fatal("deleteChild should panic on leaf") + } + }() + leaf.deleteChild('a') + }) + + // Test shrink panic + t.Run("shrink", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "shrink called on leaf") + } else { + t.Fatal("shrink should panic on leaf") + } + }() + leaf.shrink() + }) + + // Test other leaf methods that should work + require_True(t, leaf.isFull()) + require_True(t, leaf.base() == nil) + require_Equal(t, leaf.numChildren(), uint16(0)) + require_True(t, leaf.children() == nil) + + // Test iter (should do nothing) + called := false + leaf.iter(func(n node) bool { + called = true + return true + }) + require_False(t, called) +} + +func TestSizeOnNilTree(t *testing.T) { + var st *SubjectTree[int] + require_Equal(t, st.Size(), 0) +} + +func TestFindEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Test Find with empty subject at root level + st.Insert(b("foo.bar.baz"), 1) + st.Insert(b("foo"), 2) + + // This should create a tree structure, now test finding with edge cases + v, found := st.Find(b("")) + require_False(t, found) + require_True(t, v == nil) +} + +func TestNodeIterMethods(t *testing.T) { + // Test node4 iter + n4 := newNode4(b("prefix")) + n4.addChild('a', newLeaf(b("1"), 1)) + n4.addChild('b', newLeaf(b("2"), 2)) + + count := 0 + n4.iter(func(n node) bool { + count++ + return true + }) + require_Equal(t, count, 2) + + // Test early termination + count = 0 + n4.iter(func(n node) bool { + count++ + return false + }) + require_Equal(t, count, 1) + + // Test node10 iter + n10 := newNode10(b("prefix")) + for i := 0; i < 5; i++ { + n10.addChild(byte('a'+i), newLeaf([]byte{byte('0' + i)}, i)) + } + + count = 0 + n10.iter(func(n node) bool { + count++ + return true + }) + require_Equal(t, count, 5) + + // Test node16 iter + n16 := newNode16(b("prefix")) + for i := 0; i < 8; i++ { + n16.addChild(byte('a'+i), newLeaf([]byte{byte('0' + i)}, i)) + } + + count = 0 + n16.iter(func(n node) bool { + count++ + return true + }) + require_Equal(t, count, 8) +} + +func TestIterOrderedAndIterFastNilRoot(t *testing.T) { + // Test IterOrdered with nil root + st := NewSubjectTree[int]() + count := 0 + st.IterOrdered(func(subject []byte, val *int) bool { + count++ + return true + }) + require_Equal(t, count, 0) + + // Test IterFast with nil root + count = 0 + st.IterFast(func(subject []byte, val *int) bool { + count++ + return true + }) + require_Equal(t, count, 0) +} + +func TestNodeAddChildPanic(t *testing.T) { + // Test node4 addChild panic when full + n4 := newNode4(b("prefix")) + n4.addChild('a', newLeaf(b("1"), 1)) + n4.addChild('b', newLeaf(b("2"), 2)) + n4.addChild('c', newLeaf(b("3"), 3)) + n4.addChild('d', newLeaf(b("4"), 4)) + + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "node4 full!") + } else { + t.Fatal("addChild should panic when node4 is full") + } + }() + n4.addChild('e', newLeaf(b("5"), 5)) +} + +func TestNodeAddChildPanicOthers(t *testing.T) { + // Test node10 addChild panic when full + t.Run("node10", func(t *testing.T) { + n10 := newNode10(b("prefix")) + for i := 0; i < 10; i++ { + n10.addChild(byte('a'+i), newLeaf([]byte{byte('0' + i)}, i)) + } + + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "node10 full!") + } else { + t.Fatal("addChild should panic when node10 is full") + } + }() + n10.addChild('k', newLeaf(b("11"), 11)) + }) + + // Test node16 addChild panic when full + t.Run("node16", func(t *testing.T) { + n16 := newNode16(b("prefix")) + for i := 0; i < 16; i++ { + n16.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "node16 full!") + } else { + t.Fatal("addChild should panic when node16 is full") + } + }() + n16.addChild(16, newLeaf(b("16"), 16)) + }) + + // Test node48 addChild panic when full + t.Run("node48", func(t *testing.T) { + n48 := newNode48(b("prefix")) + for i := 0; i < 48; i++ { + n48.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + + defer func() { + if r := recover(); r != nil { + require_Equal(t, r, "node48 full!") + } else { + t.Fatal("addChild should panic when node48 is full") + } + }() + n48.addChild(48, newLeaf(b("48"), 48)) + }) +} + +func TestNodeDeleteChildNotFound(t *testing.T) { + // Test node10 deleteChild when child doesn't exist + n10 := newNode10(b("prefix")) + n10.addChild('a', newLeaf(b("1"), 1)) + n10.addChild('b', newLeaf(b("2"), 2)) + + // Try to delete non-existent child + n10.deleteChild('z') + require_Equal(t, n10.size, uint16(2)) // Size should remain unchanged + + // Test node16 deleteChild when child doesn't exist + n16 := newNode16(b("prefix")) + n16.addChild('a', newLeaf(b("1"), 1)) + n16.addChild('b', newLeaf(b("2"), 2)) + + n16.deleteChild('z') + require_Equal(t, n16.size, uint16(2)) + + // Test node48 deleteChild when child doesn't exist + n48 := newNode48(b("prefix")) + n48.addChild(0, newLeaf(b("1"), 1)) + n48.addChild(1, newLeaf(b("2"), 2)) + + n48.deleteChild(255) + require_Equal(t, n48.size, uint16(2)) +} + +func TestNodeShrinkNotNeeded(t *testing.T) { + // Test node10 shrink when not needed (has more than 4 children) + n10 := newNode10(b("prefix")) + for i := 0; i < 5; i++ { + n10.addChild(byte('a'+i), newLeaf([]byte{byte('0' + i)}, i)) + } + + shrunk := n10.shrink() + require_True(t, shrunk == nil) // Should not shrink + + // Test node16 shrink when not needed (has more than 10 children) + n16 := newNode16(b("prefix")) + for i := 0; i < 11; i++ { + n16.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + + shrunk = n16.shrink() + require_True(t, shrunk == nil) // Should not shrink +} + +func TestNode48IterEarlyTermination(t *testing.T) { + n48 := newNode48(b("prefix")) + for i := 0; i < 10; i++ { + n48.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + + count := 0 + n48.iter(func(n node) bool { + count++ + return false // Stop immediately + }) + require_Equal(t, count, 1) +} + +func TestNode10And16IterEarlyTermination(t *testing.T) { + // Test node10 early termination + n10 := newNode10(b("prefix")) + for i := 0; i < 5; i++ { + n10.addChild(byte('a'+i), newLeaf([]byte{byte('0' + i)}, i)) + } + + count := 0 + n10.iter(func(n node) bool { + count++ + return count < 2 // Stop after 2 + }) + require_Equal(t, count, 2) + + // Test node16 early termination + n16 := newNode16(b("prefix")) + for i := 0; i < 8; i++ { + n16.addChild(byte(i), newLeaf([]byte{byte(i)}, i)) + } + + count = 0 + n16.iter(func(n node) bool { + count++ + return count < 3 // Stop after 3 + }) + require_Equal(t, count, 3) +} + +func TestMatchPartsEdgeCases(t *testing.T) { + // Test the edge case in matchParts that's not covered + // This is the case where we have a part that needs to be copied and modified + + // Create a complex filter that will trigger the edge case + filter := b("foo.*.bar.>") + parts := genParts(filter, nil) + + // Test with a fragment that will cause partial matching + frag := b("foo.test") + remaining, matched := matchParts(parts, frag) + require_True(t, matched) + require_True(t, len(remaining) > 0) +} + +func TestInsertEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Test inserting with noPivot byte (should fail) + old, updated := st.Insert([]byte("foo\x7fbar"), 1) + require_True(t, old == nil) + require_False(t, updated) + require_Equal(t, st.Size(), 0) // Should not insert + + // Test the edge case where we need to split with same pivot + st = NewSubjectTree[int]() + // This case tests subjects that cause the same pivot after split + // Both subjects share prefix "a" and have same pivot "." after split + st.Insert(b("a.b"), 1) + // Now insert one that will cause the split with same pivot + st.Insert(b("a.c"), 2) + + require_Equal(t, st.Size(), 2) +} + +func TestDeleteEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Test delete on empty tree + val, deleted := st.Delete(b("foo")) + require_False(t, deleted) + require_True(t, val == nil) + + // Test delete with empty subject + st.Insert(b("foo"), 1) + val, deleted = st.Delete(b("")) + require_False(t, deleted) + require_True(t, val == nil) + + // Test delete with subject shorter than prefix + st = NewSubjectTree[int]() + st.Insert(b("verylongprefix.suffix"), 1) + st.Insert(b("verylongprefix.suffix2"), 2) + val, deleted = st.Delete(b("very")) + require_False(t, deleted) + require_True(t, val == nil) +} + +func TestMatchEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Test match with nil callback + st.Insert(b("foo.bar"), 1) + st.Match(b("foo.*"), nil) // Should not panic + + // Test match with empty filter + count := 0 + st.Match(b(""), func(subject []byte, val *int) { + count++ + }) + require_Equal(t, count, 0) +} + +func TestIterEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Add multiple subjects to create a complex tree + st.Insert(b("a.b.c"), 1) + st.Insert(b("a.b.d"), 2) + st.Insert(b("a.c.d"), 3) + st.Insert(b("b.c.d"), 4) + + // Test iter with early termination at different points + count := 0 + st.iter(st.root, nil, false, func(subject []byte, val *int) bool { + count++ + return count < 2 + }) + require_Equal(t, count, 2) +} + +func TestLeafIter(t *testing.T) { + // Test that leaf iter does nothing (it's a no-op) + leaf := newLeaf(b("test"), 42) + called := false + + // Call iter with a function that would set called to true + leaf.iter(func(n node) bool { + called = true + return true + }) + require_False(t, called) // Should never be called since leaf.iter is a no-op + + // Call iter again with a function that returns false + leaf.iter(func(n node) bool { + called = true + return false + }) + require_False(t, called) // Still should never be called + + // Verify the leaf itself is not affected + require_True(t, leaf.match(b("test"))) + require_Equal(t, leaf.value, 42) + + // Also test through the node interface to ensure coverage + var n node = leaf + called = false + n.iter(func(child node) bool { + called = true + return true + }) + require_False(t, called) // Still should never be called +} + +func TestDeleteChildEdgeCasesMore(t *testing.T) { + // Test the edge case in node10 deleteChild where we don't swap (last element) + n10 := newNode10(b("prefix")) + n10.addChild('a', newLeaf(b("1"), 1)) + n10.addChild('b', newLeaf(b("2"), 2)) + n10.addChild('c', newLeaf(b("3"), 3)) + + // Delete the last child + n10.deleteChild('c') + require_Equal(t, n10.size, uint16(2)) + + // Test the edge case in node16 deleteChild where we don't swap (last element) + n16 := newNode16(b("prefix")) + n16.addChild('a', newLeaf(b("1"), 1)) + n16.addChild('b', newLeaf(b("2"), 2)) + n16.addChild('c', newLeaf(b("3"), 3)) + + // Delete the last child + n16.deleteChild('c') + require_Equal(t, n16.size, uint16(2)) +} + +func TestMatchPartsMoreEdgeCases(t *testing.T) { + // Test the remaining 2.6% of matchParts + // Case where frag is empty + parts := genParts(b("foo.*"), nil) + remaining, matched := matchParts(parts, b("")) + require_True(t, matched) + require_Equal(t, len(remaining), len(parts)) +} + +func TestInsertComplexEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Test the recursive insert case with same pivot + // This requires a very specific setup + // First, create a tree structure that will trigger the recursive path + st.Insert(b("a"), 1) + st.Insert(b("aa"), 2) // This will create a split + + // Now insert something that has the same pivot after split + st.Insert(b("aaa"), 3) // This should trigger the recursive insert path + + require_Equal(t, st.Size(), 3) + + // Verify all values can be found + v, found := st.Find(b("a")) + require_True(t, found) + require_Equal(t, *v, 1) + + v, found = st.Find(b("aa")) + require_True(t, found) + require_Equal(t, *v, 2) + + v, found = st.Find(b("aaa")) + require_True(t, found) + require_Equal(t, *v, 3) +} + +func TestDeleteNilNodePointer(t *testing.T) { + st := NewSubjectTree[int]() + // Test delete with nil node + var n node + val, deleted := st.delete(&n, b("foo"), 0) + require_False(t, deleted) + require_True(t, val == nil) +} + +func TestMatchComplexEdgeCases(t *testing.T) { + st := NewSubjectTree[int]() + + // Build a complex tree to test the 2.2% uncovered in match + st.Insert(b("foo.bar.baz"), 1) + st.Insert(b("foo.bar.qux"), 2) + st.Insert(b("foo.baz.bar"), 3) + st.Insert(b("bar.foo.baz"), 4) + + // Test with terminal fwc but no remaining parts + count := 0 + st.Match(b("foo.bar.>"), func(subject []byte, val *int) { + count++ + }) + require_Equal(t, count, 2) +} + +func TestIterComplexTree(t *testing.T) { + st := NewSubjectTree[int]() + + // Build a deeper tree to test the remaining iter cases + for i := 0; i < 20; i++ { + st.Insert([]byte(fmt.Sprintf("level1.level2.level3.item%d", i)), i) + } + + // This should create multiple node types and test more paths + count := 0 + st.IterOrdered(func(subject []byte, val *int) bool { + count++ + return true + }) + require_Equal(t, count, 20) +} diff --git a/server/subject_transform.go b/server/subject_transform.go index 42cc17e0672..4502d508f23 100644 --- a/server/subject_transform.go +++ b/server/subject_transform.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/subject_transform_test.go b/server/subject_transform_test.go index 19a5bce7895..a1c62c54b31 100644 --- a/server/subject_transform_test.go +++ b/server/subject_transform_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sublist_test.go b/server/sublist_test.go index f7104dac58e..acc5b53af25 100644 --- a/server/sublist_test.go +++ b/server/sublist_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2024 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_bsd.go b/server/sysmem/mem_bsd.go index 6cc63b1d1ad..46aaf994c7d 100644 --- a/server/sysmem/mem_bsd.go +++ b/server/sysmem/mem_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2019-2021 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_darwin.go b/server/sysmem/mem_darwin.go index f8e049b9a81..e2dee2877ea 100644 --- a/server/sysmem/mem_darwin.go +++ b/server/sysmem/mem_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2019-2021 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_linux.go b/server/sysmem/mem_linux.go index 26e0bd1525c..2d15e5b3b28 100644 --- a/server/sysmem/mem_linux.go +++ b/server/sysmem/mem_linux.go @@ -1,4 +1,4 @@ -// Copyright 2019-2021 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_wasm.go b/server/sysmem/mem_wasm.go index bbc43af7fef..bde3586e56f 100644 --- a/server/sysmem/mem_wasm.go +++ b/server/sysmem/mem_wasm.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_windows.go b/server/sysmem/mem_windows.go index 3f070887d28..9fcb0eafac9 100644 --- a/server/sysmem/mem_windows.go +++ b/server/sysmem/mem_windows.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/mem_zos.go b/server/sysmem/mem_zos.go index cc57620e855..f8db5c5c77d 100644 --- a/server/sysmem/mem_zos.go +++ b/server/sysmem/mem_zos.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/sysmem/sysctl.go b/server/sysmem/sysctl.go index 550961ae10b..fd6a41d6ddc 100644 --- a/server/sysmem/sysctl.go +++ b/server/sysmem/sysctl.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/trust_test.go b/server/trust_test.go index 63ed976a02e..951a9ca31ba 100644 --- a/server/trust_test.go +++ b/server/trust_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/util_test.go b/server/util_test.go index b2baffadff6..4138010dadc 100644 --- a/server/util_test.go +++ b/server/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2022 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/server/websocket_test.go b/server/websocket_test.go index c6921001bda..9ffc81e0710 100644 --- a/server/websocket_test.go +++ b/server/websocket_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/accounts_cycles_test.go b/test/accounts_cycles_test.go index 37859b73572..e00aae162b7 100644 --- a/test/accounts_cycles_test.go +++ b/test/accounts_cycles_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/bench_test.go b/test/bench_test.go index b4fef672c6d..afd1cdcd17e 100644 --- a/test/bench_test.go +++ b/test/bench_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/client_auth_test.go b/test/client_auth_test.go index 2241dc19d78..711a7468ece 100644 --- a/test/client_auth_test.go +++ b/test/client_auth_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2022 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/client_cluster_test.go b/test/client_cluster_test.go index d72568a33e1..ea4ba5522a2 100644 --- a/test/client_cluster_test.go +++ b/test/client_cluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/cluster_test.go b/test/cluster_test.go index a0eeb403285..eb5a4b28841 100644 --- a/test/cluster_test.go +++ b/test/cluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/cluster_tls_test.go b/test/cluster_tls_test.go index 3ce822a2cb4..cc363a33ff3 100644 --- a/test/cluster_tls_test.go +++ b/test/cluster_tls_test.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/fanout_test.go b/test/fanout_test.go index 048180ab5f3..a94a832f596 100644 --- a/test/fanout_test.go +++ b/test/fanout_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/gosrv_test.go b/test/gosrv_test.go index 4d20ae23c01..c06aaee3718 100644 --- a/test/gosrv_test.go +++ b/test/gosrv_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/leafnode_test.go b/test/leafnode_test.go index 3fb01902bab..75820ee2f66 100644 --- a/test/leafnode_test.go +++ b/test/leafnode_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/maxpayload_test.go b/test/maxpayload_test.go index 6883a5f1731..723ea2986b7 100644 --- a/test/maxpayload_test.go +++ b/test/maxpayload_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2020 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/norace_test.go b/test/norace_test.go index 12b171f6f91..2b2c7123ad6 100644 --- a/test/norace_test.go +++ b/test/norace_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/ocsp_peer_test.go b/test/ocsp_peer_test.go index a6b2e3269ba..7f89928acdc 100644 --- a/test/ocsp_peer_test.go +++ b/test/ocsp_peer_test.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/ocsp_test.go b/test/ocsp_test.go index 964cbf121da..01f96b585c5 100644 --- a/test/ocsp_test.go +++ b/test/ocsp_test.go @@ -1,4 +1,4 @@ -// Copyright 2021-2024 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/operator_test.go b/test/operator_test.go index 83a47fde2b6..b621b5a136d 100644 --- a/test/operator_test.go +++ b/test/operator_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2023 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/opts_test.go b/test/opts_test.go index addd058fe22..111610df7f3 100644 --- a/test/opts_test.go +++ b/test/opts_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2020 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/pid_test.go b/test/pid_test.go index 1201ee2c500..4c530955a82 100644 --- a/test/pid_test.go +++ b/test/pid_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2022 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/ping_test.go b/test/ping_test.go index 3dfdeb781cc..ca19af591fc 100644 --- a/test/ping_test.go +++ b/test/ping_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/routes_test.go b/test/routes_test.go index 6d8ca2da1e2..e577a732387 100644 --- a/test/routes_test.go +++ b/test/routes_test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/service_latency_test.go b/test/service_latency_test.go index 4379fe9578b..19700c8a7e0 100644 --- a/test/service_latency_test.go +++ b/test/service_latency_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/services_test.go b/test/services_test.go index 9fff2c5a090..2aa8bce3be8 100644 --- a/test/services_test.go +++ b/test/services_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/system_services_test.go b/test/system_services_test.go index d3e493f1ab0..c4f2470eaaa 100644 --- a/test/system_services_test.go +++ b/test/system_services_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2023 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/test.go b/test/test.go index 27dada57655..183e7ce80f3 100644 --- a/test/test.go +++ b/test/test.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/test_test.go b/test/test_test.go index 45b790e6790..4e72b42bbd4 100644 --- a/test/test_test.go +++ b/test/test_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2024 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/tls_test.go b/test/tls_test.go index 7fc56c8a003..60723fcbbf4 100644 --- a/test/tls_test.go +++ b/test/tls_test.go @@ -1,4 +1,4 @@ -// Copyright 2015-2024 The NATS Authors +// Copyright 2015-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/test/user_authorization_test.go b/test/user_authorization_test.go index 5ada13bd134..feca53efd7f 100644 --- a/test/user_authorization_test.go +++ b/test/user_authorization_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at