From 46cecc56ec02993483732698f49cfbe35ec6ee1c Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Fri, 21 Aug 2020 20:53:00 +0200 Subject: [PATCH] Use uint64 intead of uint32 There are posibility we run into a problem of int32 overflow. To prevent this let's use uint64 everywhere. https://github.com/allegro/bigcache/blob/21e5ca5c3d539f94e8dc563350acd97c5400154f/shard.go#L138 Fixes: https://github.com/allegro/bigcache/issues/148 --- bigcache_148_test.go | 51 ++++++++++++++++++++++++++++++++++++++++++++ queue/bytes_queue.go | 51 ++++++++++++++++++++++---------------------- shard.go | 10 ++++----- 3 files changed, 82 insertions(+), 30 deletions(-) create mode 100644 bigcache_148_test.go diff --git a/bigcache_148_test.go b/bigcache_148_test.go new file mode 100644 index 00000000..5c681bd0 --- /dev/null +++ b/bigcache_148_test.go @@ -0,0 +1,51 @@ +package bigcache + +import ( + "bytes" + "strconv" + "testing" + "time" +) + +func Test_issue_148(t *testing.T) { + const n = 2070400 + var message = bytes.Repeat([]byte{0}, 2<<10) + cache, _ := NewBigCache(Config{ + Shards: 1, + LifeWindow: time.Hour, + MaxEntriesInWindow: 10, + MaxEntrySize: len(message), + HardMaxCacheSize: 2 << 13, + }) + for i := 0; i < n; i++ { + err := cache.Set(strconv.Itoa(i), message) + if err != nil { + t.Fatal(err) + } + } + + err := cache.Set(strconv.Itoa(n), message) + if err != nil { + t.Fatal(err) + } + + cache.Get(strconv.Itoa(n)) + + i := 0 + defer func() { + if r := recover(); r != nil { + t.Log("Element: ", i) + t.Fatal(r) + } + }() + + for ; i < n; i++ { + v, err := cache.Get(strconv.Itoa(i)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(v, message) { + t.Fatal("Should be equal", i, v, message) + } + } +} diff --git a/queue/bytes_queue.go b/queue/bytes_queue.go index 184c1e8b..be61fd6c 100644 --- a/queue/bytes_queue.go +++ b/queue/bytes_queue.go @@ -24,12 +24,12 @@ var ( type BytesQueue struct { full bool array []byte - capacity int - maxCapacity int - head int - tail int + capacity uint64 + maxCapacity uint64 + head uint64 + tail uint64 count int - rightMargin int + rightMargin uint64 headerBuffer []byte verbose bool } @@ -39,7 +39,7 @@ type queueError struct { } // getUvarintSize returns the number of bytes to encode x in uvarint format -func getUvarintSize(x uint32) int { +func getUvarintSize(x uint32) uint64 { if x < 128 { return 1 } else if x < 16384 { @@ -59,8 +59,8 @@ func getUvarintSize(x uint32) int { func NewBytesQueue(capacity int, maxCapacity int, verbose bool) *BytesQueue { return &BytesQueue{ array: make([]byte, capacity), - capacity: capacity, - maxCapacity: maxCapacity, + capacity: uint64(capacity), + maxCapacity: uint64(maxCapacity), headerBuffer: make([]byte, binary.MaxVarintLen32), tail: leftMarginIndex, head: leftMarginIndex, @@ -82,7 +82,7 @@ func (q *BytesQueue) Reset() { // Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. // Returns index for pushed data or error if maximum size queue limit is reached. func (q *BytesQueue) Push(data []byte) (int, error) { - dataLen := len(data) + dataLen := uint64(len(data)) headerEntrySize := getUvarintSize(uint32(dataLen)) if !q.canInsertAfterTail(dataLen + headerEntrySize) { @@ -99,10 +99,10 @@ func (q *BytesQueue) Push(data []byte) (int, error) { q.push(data, dataLen) - return index, nil + return int(index), nil } -func (q *BytesQueue) allocateAdditionalMemory(minimum int) { +func (q *BytesQueue) allocateAdditionalMemory(minimum uint64) { start := time.Now() if q.capacity < minimum { q.capacity += minimum @@ -134,8 +134,8 @@ func (q *BytesQueue) allocateAdditionalMemory(minimum int) { } } -func (q *BytesQueue) push(data []byte, len int) { - headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len)) +func (q *BytesQueue) push(data []byte, len uint64) { + headerEntrySize := uint64(binary.PutUvarint(q.headerBuffer, len)) q.copy(q.headerBuffer, headerEntrySize) q.copy(data, len) @@ -150,8 +150,8 @@ func (q *BytesQueue) push(data []byte, len int) { q.count++ } -func (q *BytesQueue) copy(data []byte, len int) { - q.tail += copy(q.array[q.tail:], data[:len]) +func (q *BytesQueue) copy(data []byte, len uint64) { + q.tail += uint64(copy(q.array[q.tail:], data[:len])) } // Pop reads the oldest entry from queue and moves head pointer to the next one @@ -160,7 +160,7 @@ func (q *BytesQueue) Pop() ([]byte, error) { if err != nil { return nil, err } - size := len(data) + size := uint64(len(data)) q.head += headerEntrySize + size q.count-- @@ -186,18 +186,18 @@ func (q *BytesQueue) Peek() ([]byte, error) { // Get reads entry from index func (q *BytesQueue) Get(index int) ([]byte, error) { - data, _, err := q.peek(index) + data, _, err := q.peek(uint64(index)) return data, err } // CheckGet checks if an entry can be read from index func (q *BytesQueue) CheckGet(index int) error { - return q.peekCheckErr(index) + return q.peekCheckErr(uint64(index)) } // Capacity returns number of allocated bytes for queue func (q *BytesQueue) Capacity() int { - return q.capacity + return int(q.capacity) } // Len returns number of entries kept in queue @@ -211,7 +211,7 @@ func (e *queueError) Error() string { } // peekCheckErr is identical to peek, but does not actually return any data -func (q *BytesQueue) peekCheckErr(index int) error { +func (q *BytesQueue) peekCheckErr(index uint64) error { if q.count == 0 { return errEmptyQueue @@ -221,25 +221,26 @@ func (q *BytesQueue) peekCheckErr(index int) error { return errInvalidIndex } - if index >= len(q.array) { + if index >= uint64(len(q.array)) { return errIndexOutOfBounds } return nil } // peek returns the data from index and the number of bytes to encode the length of the data in uvarint format -func (q *BytesQueue) peek(index int) ([]byte, int, error) { +func (q *BytesQueue) peek(index uint64) ([]byte, uint64, error) { err := q.peekCheckErr(index) if err != nil { return nil, 0, err } blockSize, n := binary.Uvarint(q.array[index:]) - return q.array[index+n : index+n+int(blockSize)], n, nil + un := uint64(n) + return q.array[index+un : index+un+blockSize], un, nil } // canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue -func (q *BytesQueue) canInsertAfterTail(need int) bool { +func (q *BytesQueue) canInsertAfterTail(need uint64) bool { if q.full { return false } @@ -254,7 +255,7 @@ func (q *BytesQueue) canInsertAfterTail(need int) bool { } // canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue -func (q *BytesQueue) canInsertBeforeHead(need int) bool { +func (q *BytesQueue) canInsertBeforeHead(need uint64) bool { if q.full { return false } diff --git a/shard.go b/shard.go index 1fd4c1a7..642a8047 100644 --- a/shard.go +++ b/shard.go @@ -16,7 +16,7 @@ type Metadata struct { } type cacheShard struct { - hashmap map[uint64]uint32 + hashmap map[uint64]uint64 entries queue.BytesQueue lock sync.RWMutex entryBuffer []byte @@ -135,7 +135,7 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { for { if index, err := s.entries.Push(w); err == nil { - s.hashmap[hashedKey] = uint32(index) + s.hashmap[hashedKey] = uint64(index) s.lock.Unlock() return nil } @@ -163,7 +163,7 @@ func (s *cacheShard) setWithoutLock(key string, hashedKey uint64, entry []byte) for { if index, err := s.entries.Push(w); err == nil { - s.hashmap[hashedKey] = uint32(index) + s.hashmap[hashedKey] = uint64(index) return nil } if s.removeOldestEntry(NoSpace) != nil { @@ -310,7 +310,7 @@ func (s *cacheShard) removeOldestEntry(reason RemoveReason) error { func (s *cacheShard) reset(config Config) { s.lock.Lock() - s.hashmap = make(map[uint64]uint32, config.initialShardSize()) + s.hashmap = make(map[uint64]uint64, config.initialShardSize()) s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) s.entries.Reset() s.lock.Unlock() @@ -395,7 +395,7 @@ func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheS bytesQueueInitialCapacity = maximumShardSizeInBytes } return &cacheShard{ - hashmap: make(map[uint64]uint32, config.initialShardSize()), + hashmap: make(map[uint64]uint64, config.initialShardSize()), hashmapStats: make(map[uint64]uint32, config.initialShardSize()), entries: *queue.NewBytesQueue(bytesQueueInitialCapacity, maximumShardSizeInBytes, config.Verbose), entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),