Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 48 additions & 21 deletions server/filestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -4455,9 +4455,9 @@ func (fs *fileStore) EraseMsg(seq uint64) (bool, error) {

// Convenience function to remove per subject tracking at the filestore level.
// Lock should be held.
func (fs *fileStore) removePerSubject(subj string) {
func (fs *fileStore) removePerSubject(subj string) uint64 {
if len(subj) == 0 || fs.psim == nil {
return
return 0
}
// We do not update sense of fblk here but will do so when we resolve during lookup.
bsubj := stringToBytes(subj)
Expand All @@ -4468,9 +4468,12 @@ func (fs *fileStore) removePerSubject(subj string) {
} else if info.total == 0 {
if _, ok = fs.psim.Delete(bsubj); ok {
fs.tsl -= len(subj)
return 0
}
}
return info.total
}
return 0
}

// Remove a message, optionally rewriting the mb file.
Expand Down Expand Up @@ -5504,6 +5507,14 @@ func (mb *msgBlock) writeTombstone(seq uint64, ts int64) error {
return mb.writeMsgRecord(emptyRecordLen, seq|tbit, _EMPTY_, nil, nil, ts, true)
}

// Helper function to place a delete tombstone without flush.
// Lock should not be held.
func (mb *msgBlock) writeTombstoneNoFlush(seq uint64, ts int64) error {
mb.mu.Lock()
defer mb.mu.Unlock()
return mb.writeMsgRecordLocked(emptyRecordLen, seq|tbit, _EMPTY_, nil, nil, ts, false, false)
}

// Will write the message record to the underlying message block.
// filestore lock will be held.
func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte, ts int64, flush bool) error {
Expand Down Expand Up @@ -5743,10 +5754,13 @@ func (fs *fileStore) checkLastBlock(rl uint64) (lmb *msgBlock, err error) {
lmb = fs.lmb
rbytes := lmb.blkSize()
if lmb == nil || (rbytes > 0 && rbytes+rl > fs.fcfg.BlockSize) {
if lmb != nil && fs.fcfg.Compression != NoCompression {
// We've now reached the end of this message block, if we want
// to compress blocks then now's the time to do it.
go lmb.recompressOnDiskIfNeeded()
if lmb != nil {
lmb.flushPendingMsgs()
if fs.fcfg.Compression != NoCompression {
// We've now reached the end of this message block, if we want
// to compress blocks then now's the time to do it.
go lmb.recompressOnDiskIfNeeded()
}
}
if lmb, err = fs.newMsgBlockForWrite(); err != nil {
return nil, err
Expand Down Expand Up @@ -5792,18 +5806,12 @@ func (fs *fileStore) writeTombstone(seq uint64, ts int64) error {
// This version does not flush contents.
// Lock should be held.
func (fs *fileStore) writeTombstoneNoFlush(seq uint64, ts int64) error {
// Grab our current last message block.
olmb := fs.lmb
lmb, err := fs.checkLastBlock(emptyRecordLen)
if err != nil {
return err
}
// If we swapped out our lmb, flush any pending.
if olmb != lmb {
olmb.flushPendingMsgs()
}
// Write tombstone without flush or kick.
return lmb.writeTombstone(seq, ts)
return lmb.writeTombstoneNoFlush(seq, ts)
}

func (mb *msgBlock) recompressOnDiskIfNeeded() error {
Expand Down Expand Up @@ -7584,6 +7592,11 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
}
}

// Make sure to not leave subject if empty and we reach this spot.
if subject == _EMPTY_ {
subject = fwcs
}

eq, wc := compareFn(subject), subjectHasWildcard(subject)
var firstSeqNeedsUpdate bool
var bytes uint64
Expand Down Expand Up @@ -7632,6 +7645,8 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
shouldExpire = true
}

var nrg uint64 // Number of remaining messages globally after removal from psim.

for seq, te := f, len(tombs); seq <= l; seq++ {
if sm, _ := mb.cacheLookupNoCopy(seq, &smv); sm != nil && eq(sm.subj, subject) {
rl := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg)
Expand All @@ -7655,8 +7670,8 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
bytes += rl
}
// PSIM and FSS updates.
mb.removeSeqPerSubject(sm.subj, seq)
fs.removePerSubject(sm.subj)
nr := mb.removeSeqPerSubject(sm.subj, seq)
nrg = fs.removePerSubject(sm.subj)

// Track tombstones we need to write.
tombs = append(tombs, msgId{sm.seq, sm.ts})
Expand Down Expand Up @@ -7686,6 +7701,11 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
if mb.isEmpty() || (maxp > 0 && purged >= maxp) {
break
}
// Also break if we know we have no more messages matching here.
// This is only applicable for non-wildcarded filters.
if !wc && nr == 0 {
break
}
}
}
// Expire if we were responsible for loading and we do not seem to be doing successive purgeEx calls.
Expand All @@ -7700,6 +7720,10 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
if maxp > 0 && purged >= maxp {
break
}
// Also check if not wildcarded and we have no remaining matches.
if !wc && nrg == 0 {
break
}
}
if firstSeqNeedsUpdate {
fs.selectNextFirst()
Expand All @@ -7716,6 +7740,7 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint
return purged, err
}
}
// Flush any pending. If we change blocks the checkLastBlock() will flush any pending for us.
if lmb := fs.lmb; lmb != nil {
lmb.flushPendingMsgs()
}
Expand Down Expand Up @@ -8364,20 +8389,20 @@ func (mb *msgBlock) dirtyCloseWithRemove(remove bool) error {

// Remove a seq from the fss and select new first.
// Lock should be held.
func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64) {
func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64) uint64 {
mb.ensurePerSubjectInfoLoaded()
if mb.fss == nil {
return
return 0
}
bsubj := stringToBytes(subj)
ss, ok := mb.fss.Find(bsubj)
if !ok || ss == nil {
return
return 0
}

if ss.Msgs == 1 {
mb.fss.Delete(bsubj)
return
return 0
}

ss.Msgs--
Expand All @@ -8387,18 +8412,20 @@ func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64) {
if !ss.lastNeedsUpdate && seq != ss.Last {
ss.First = ss.Last
ss.firstNeedsUpdate = false
return
return 1
}
if !ss.firstNeedsUpdate && seq != ss.First {
ss.Last = ss.First
ss.lastNeedsUpdate = false
return
return 1
}
}

// We can lazily calculate the first/last sequence when needed.
ss.firstNeedsUpdate = seq == ss.First || ss.firstNeedsUpdate
ss.lastNeedsUpdate = seq == ss.Last || ss.lastNeedsUpdate

return ss.Msgs
}

// Will recalculate the first and/or last sequence for this subject in this block.
Expand Down
2 changes: 1 addition & 1 deletion server/filestore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9102,7 +9102,7 @@ func TestFileStoreRemoveMsgBlockLast(t *testing.T) {

func TestFileStoreAllLastSeqs(t *testing.T) {
fs, err := newFileStore(
FileStoreConfig{StoreDir: t.TempDir()}, // Make block size small to test multiblock selections with maxSeq
FileStoreConfig{StoreDir: t.TempDir()},
StreamConfig{Name: "zzz", Subjects: []string{"*.*"}, MaxMsgsPer: 50, Storage: FileStorage})
require_NoError(t, err)
defer fs.Stop()
Expand Down
83 changes: 83 additions & 0 deletions server/jetstream_cluster_1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3804,6 +3804,89 @@ func TestJetStreamClusterPeerRemovalAndStreamReassignmentWithoutSpace(t *testing
streamCurrent(2)
}

func TestJetStreamClusterPeerRemovalAndServerBroughtBack(t *testing.T) {
// Speed up for this test
peerRemoveTimeout = 2 * time.Second
defer func() {
peerRemoveTimeout = peerRemoveTimeoutDefault
}()

c := createJetStreamClusterExplicit(t, "R5S", 5)
defer c.shutdown()

// Client based API
ml := c.leader()
nc, err := nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
if err != nil {
t.Fatalf("Failed to create system client: %v", err)
}
defer nc.Close()

getPeersCount := func() int {
js := ml.getJetStream()
if js == nil {
return 0
}
js.mu.RLock()
defer js.mu.RUnlock()

cc := js.cluster
if !cc.isLeader() || cc.meta == nil {
return 0
}
return len(cc.meta.Peers())
}

checkFor(t, 2*time.Second, 250*time.Millisecond, func() error {
if l := getPeersCount(); l != 5 {
return fmt.Errorf("expected 5 peers, got %d", l)
}
return nil
})

// Shutdown server first.
rs := c.randomNonLeader()
rs.Shutdown()

// Peers should still remain the same, even if one server is shut down.
checkFor(t, 2*time.Second, 250*time.Millisecond, func() error {
if l := getPeersCount(); l != 5 {
return fmt.Errorf("expected 5 peers, got %d", l)
}
return nil
})

// Peer-remove after shutdown.
req := &JSApiMetaServerRemoveRequest{Server: rs.Name()}
jsreq, err := json.Marshal(req)
require_NoError(t, err)
rmsg, err := nc.Request(JSApiRemoveServer, jsreq, time.Second)
require_NoError(t, err)

var resp JSApiMetaServerRemoveResponse
require_NoError(t, json.Unmarshal(rmsg.Data, &resp))
if resp.Error != nil {
t.Fatalf("Unexpected error: %+v", resp.Error)
}

// Peer should be removed.
checkFor(t, 2*time.Second, 250*time.Millisecond, func() error {
if l := getPeersCount(); l != 4 {
return fmt.Errorf("expected 4 peers, got %d", l)
}
return nil
})

// Bringing back the server should re-add to peers after peer-remove timeout.
c.restartServer(rs)
checkFor(t, 5*time.Second, 250*time.Millisecond, func() error {
if l := getPeersCount(); l != 5 {
return fmt.Errorf("expected 5 peers, got %d", l)
}
return nil
})
}

func TestJetStreamClusterPeerExclusionTag(t *testing.T) {
c := createJetStreamClusterWithTemplateAndModHook(t, jsClusterTempl, "C", 3,
func(serverName, clusterName, storeDir, conf string) string {
Expand Down
57 changes: 57 additions & 0 deletions server/jetstream_consumer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package server

import (
"context"
"encoding/json"
"errors"
"fmt"
Expand All @@ -30,6 +31,7 @@ import (
"time"

"github.com/nats-io/nats.go"
"github.com/nats-io/nats.go/jetstream"
"github.com/nats-io/nuid"
)

Expand Down Expand Up @@ -1760,3 +1762,58 @@ func TestJetStreamConsumerDeliveryCount(t *testing.T) {
require_Equal(t, o.deliveryCount(2), 1)

}

// https://github.com/nats-io/nats-server/issues/6824
func TestJetStreamConsumerDeliverAllOverlappingFilterSubjects(t *testing.T) {
s := RunBasicJetStreamServer(t)
defer s.Shutdown()

nc, js := jsClientConnectNewAPI(t, s)
defer nc.Close()

ctx := context.Background()
_, err := js.CreateOrUpdateStream(ctx, jetstream.StreamConfig{
Name: "TEST",
Subjects: []string{"stream.>"},
})
require_NoError(t, err)

publishMessageCount := 10
for i := 0; i < publishMessageCount; i++ {
_, err = js.Publish(ctx, "stream.A", nil)
require_NoError(t, err)
}

// Create consumer
consumer, err := js.CreateOrUpdateConsumer(ctx, "TEST", jetstream.ConsumerConfig{
DeliverPolicy: jetstream.DeliverAllPolicy,
FilterSubjects: []string{
"stream.A",
"stream.A.>",
},
})
require_NoError(t, err)

messages := make(chan jetstream.Msg)
cc, err := consumer.Consume(func(msg jetstream.Msg) {
messages <- msg
msg.Ack()
})
require_NoError(t, err)
defer cc.Drain()

var count = 0
for {
if count == publishMessageCount {
// All messages received.
return
}
select {
case <-messages:
count++
case <-time.After(2 * time.Second):
t.Errorf("Timeout reached, %d messages received. Exiting.", count)
return
}
}
}
14 changes: 14 additions & 0 deletions server/jetstream_helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"time"

"github.com/nats-io/nats.go"
"github.com/nats-io/nats.go/jetstream"
"golang.org/x/time/rate"
)

Expand Down Expand Up @@ -1229,6 +1230,19 @@ func jsClientConnectEx(t testing.TB, s *Server, jsOpts []nats.JSOpt, opts ...nat
return nc, js
}

func jsClientConnectNewAPI(t testing.TB, s *Server, opts ...nats.Option) (*nats.Conn, jetstream.JetStream) {
t.Helper()
nc, err := nats.Connect(s.ClientURL(), opts...)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
js, err := jetstream.New(nc, jetstream.WithDefaultTimeout(10*time.Second))
if err != nil {
t.Fatalf("Unexpected error getting JetStream context: %v", err)
}
return nc, js
}

func jsClientConnectURL(t testing.TB, url string, opts ...nats.Option) (*nats.Conn, nats.JetStreamContext) {
t.Helper()

Expand Down
Loading