Skip to content

Commit

Permalink
Use uint64 intead of uint32
Browse files Browse the repository at this point in the history
There are posibility we run into a problem of int32 overflow.
To prevent this let's use uint64 everywhere.

https://github.com/allegro/bigcache/blob/21e5ca5c3d539f94e8dc563350acd97c5400154f/shard.go#L138

Fixes: allegro#148
  • Loading branch information
janisz committed Aug 21, 2020
1 parent 21e5ca5 commit 69e0c58
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 33 deletions.
57 changes: 29 additions & 28 deletions queue/bytes_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,23 +24,23 @@ var (
type BytesQueue struct {
full bool
array []byte
capacity int
maxCapacity int
head int
tail int
capacity uint64
maxCapacity uint64
head uint64
tail uint64
count int
rightMargin int
rightMargin uint64
headerBuffer []byte
verbose bool
initialCapacity int
initialCapacity uint64
}

type queueError struct {
message string
}

// getUvarintSize returns the number of bytes to encode x in uvarint format
func getUvarintSize(x uint32) int {
func getUvarintSize(x uint32) uint64 {
if x < 128 {
return 1
} else if x < 16384 {
Expand All @@ -60,14 +60,14 @@ func getUvarintSize(x uint32) int {
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
return &BytesQueue{
array: make([]byte, initialCapacity),
capacity: initialCapacity,
maxCapacity: maxCapacity,
capacity: uint64(initialCapacity),
maxCapacity: uint64(maxCapacity),
headerBuffer: make([]byte, binary.MaxVarintLen32),
tail: leftMarginIndex,
head: leftMarginIndex,
rightMargin: leftMarginIndex,
verbose: verbose,
initialCapacity: initialCapacity,
initialCapacity: uint64(initialCapacity),
}
}

Expand All @@ -84,14 +84,14 @@ func (q *BytesQueue) Reset() {
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
// Returns index for pushed data or error if maximum size queue limit is reached.
func (q *BytesQueue) Push(data []byte) (int, error) {
dataLen := len(data)
dataLen := uint64(len(data))
headerEntrySize := getUvarintSize(uint32(dataLen))

if !q.canInsertAfterTail(dataLen + headerEntrySize) {
if q.canInsertBeforeHead(dataLen + headerEntrySize) {
q.tail = leftMarginIndex
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
return -1, &queueError{"Full queue. Maximum size limit reached."}
return 0, &queueError{"Full queue. Maximum size limit reached."}
} else {
q.allocateAdditionalMemory(dataLen + headerEntrySize)
}
Expand All @@ -101,10 +101,10 @@ func (q *BytesQueue) Push(data []byte) (int, error) {

q.push(data, dataLen)

return index, nil
return int(index), nil
}

func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
func (q *BytesQueue) allocateAdditionalMemory(minimum uint64) {
start := time.Now()
if q.capacity < minimum {
q.capacity += minimum
Expand Down Expand Up @@ -136,8 +136,8 @@ func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
}
}

func (q *BytesQueue) push(data []byte, len int) {
headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len))
func (q *BytesQueue) push(data []byte, len uint64) {
headerEntrySize := uint64(binary.PutUvarint(q.headerBuffer, len))
q.copy(q.headerBuffer, headerEntrySize)

q.copy(data, len)
Expand All @@ -152,8 +152,8 @@ func (q *BytesQueue) push(data []byte, len int) {
q.count++
}

func (q *BytesQueue) copy(data []byte, len int) {
q.tail += copy(q.array[q.tail:], data[:len])
func (q *BytesQueue) copy(data []byte, len uint64) {
q.tail += uint64(copy(q.array[q.tail:], data[:len]))
}

// Pop reads the oldest entry from queue and moves head pointer to the next one
Expand All @@ -162,7 +162,7 @@ func (q *BytesQueue) Pop() ([]byte, error) {
if err != nil {
return nil, err
}
size := len(data)
size := uint64(len(data))

q.head += headerEntrySize + size
q.count--
Expand All @@ -188,18 +188,18 @@ func (q *BytesQueue) Peek() ([]byte, error) {

// Get reads entry from index
func (q *BytesQueue) Get(index int) ([]byte, error) {
data, _, err := q.peek(index)
data, _, err := q.peek(uint64(index))
return data, err
}

// CheckGet checks if an entry can be read from index
func (q *BytesQueue) CheckGet(index int) error {
return q.peekCheckErr(index)
return q.peekCheckErr(uint64(index))
}

// Capacity returns number of allocated bytes for queue
func (q *BytesQueue) Capacity() int {
return q.capacity
return int(q.capacity)
}

// Len returns number of entries kept in queue
Expand All @@ -213,7 +213,7 @@ func (e *queueError) Error() string {
}

// peekCheckErr is identical to peek, but does not actually return any data
func (q *BytesQueue) peekCheckErr(index int) error {
func (q *BytesQueue) peekCheckErr(index uint64) error {

if q.count == 0 {
return errEmptyQueue
Expand All @@ -223,25 +223,26 @@ func (q *BytesQueue) peekCheckErr(index int) error {
return errInvalidIndex
}

if index >= len(q.array) {
if index >= uint64(len(q.array)) {
return errIndexOutOfBounds
}
return nil
}

// peek returns the data from index and the number of bytes to encode the length of the data in uvarint format
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
func (q *BytesQueue) peek(index uint64) ([]byte, uint64, error) {
err := q.peekCheckErr(index)
if err != nil {
return nil, 0, err
}

blockSize, n := binary.Uvarint(q.array[index:])
return q.array[index+n : index+n+int(blockSize)], n, nil
un := uint64(n)
return q.array[index+un : index+un+blockSize], un, nil
}

// canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue
func (q *BytesQueue) canInsertAfterTail(need int) bool {
func (q *BytesQueue) canInsertAfterTail(need uint64) bool {
if q.full {
return false
}
Expand All @@ -256,7 +257,7 @@ func (q *BytesQueue) canInsertAfterTail(need int) bool {
}

// canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue
func (q *BytesQueue) canInsertBeforeHead(need int) bool {
func (q *BytesQueue) canInsertBeforeHead(need uint64) bool {
if q.full {
return false
}
Expand Down
10 changes: 5 additions & 5 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type Metadata struct {
}

type cacheShard struct {
hashmap map[uint64]uint32
hashmap map[uint64]uint64
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
Expand Down Expand Up @@ -135,7 +135,7 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {

for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.hashmap[hashedKey] = uint64(index)
s.lock.Unlock()
return nil
}
Expand Down Expand Up @@ -163,7 +163,7 @@ func (s *cacheShard) setWithoutLock(key string, hashedKey uint64, entry []byte)

for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.hashmap[hashedKey] = uint64(index)
return nil
}
if s.removeOldestEntry(NoSpace) != nil {
Expand Down Expand Up @@ -310,7 +310,7 @@ func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {

func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.hashmap = make(map[uint64]uint64, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
Expand Down Expand Up @@ -390,7 +390,7 @@ func (s *cacheShard) collision() {

func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
hashmap: make(map[uint64]uint64, config.initialShardSize()),
hashmapStats: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
Expand Down

0 comments on commit 69e0c58

Please sign in to comment.