Skip to content

Commit

Permalink
runtime: delay incrementing freeindex in malloc
Browse files Browse the repository at this point in the history
When the GC is scanning some memory (possibly conservatively),
finding a pointer, while concurrently another goroutine is
allocating an object at the same address as the found pointer, the
GC may see the pointer before the object and/or the heap bits are
initialized. This may cause the GC to see bad pointers and
possibly crash.

To prevent this, we make it that the scanner can only see the
object as allocated after the object and the heap bits are
initialized. As the scanner uses the freeindex to determine if an
object is allocated, we delay the increment of freeindex after the
initialization.

As currently in some code path finding the next free index and
updating the free index to a new slot past it is coupled, this
needs a small refactoring. In the new code mspan.nextFreeIndex
return the next free index but not update it (although allocCache
is updated). mallocgc will update it at a later time.

Fixes #54596.

Change-Id: I6dd5ccf743f2d2c46a1ed67c6a8237fe09a71260
Reviewed-on: https://go-review.googlesource.com/c/go/+/427619
TryBot-Result: Gopher Robot <[email protected]>
Run-TryBot: Cherry Mui <[email protected]>
Reviewed-by: Michael Knyszek <[email protected]>
  • Loading branch information
cherrymui authored and mknyszek committed Nov 11, 2022
1 parent fcd14bd commit bed2b7c
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 22 deletions.
40 changes: 23 additions & 17 deletions src/runtime/malloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -813,24 +813,22 @@ retry:
// base address for all 0-byte allocations
var zerobase uintptr

// nextFreeFast returns the next free object if one is quickly available.
// Otherwise it returns 0.
func nextFreeFast(s *mspan) gclinkptr {
// nextFreeFast returns the next free object if one is quickly available,
// and the corresponding free index. Otherwise it returns 0, 0.
func nextFreeFast(s *mspan) (gclinkptr, uintptr) {
theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
if theBit < 64 {
result := s.freeindex + uintptr(theBit)
if result < s.nelems {
freeidx := result + 1
if freeidx%64 == 0 && freeidx != s.nelems {
return 0
}
s.allocCache >>= uint(theBit + 1)
s.freeindex = freeidx
// NOTE: s.freeindex is not updated for now (although allocCache
// is updated). mallocgc will update s.freeindex later after the
// memory is initialized.
s.allocCount++
return gclinkptr(result*s.elemsize + s.base())
return gclinkptr(result*s.elemsize + s.base()), result
}
}
return 0
return 0, 0
}

// nextFree returns the next free object from the cached span if one is available.
Expand All @@ -842,10 +840,10 @@ func nextFreeFast(s *mspan) gclinkptr {
//
// Must run in a non-preemptible context since otherwise the owner of
// c could change.
func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, freeIndex uintptr, shouldhelpgc bool) {
s = c.alloc[spc]
shouldhelpgc = false
freeIndex := s.nextFreeIndex()
freeIndex = s.nextFreeIndex()
if freeIndex == s.nelems {
// The span is full.
if uintptr(s.allocCount) != s.nelems {
Expand Down Expand Up @@ -953,6 +951,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false
var freeidx uintptr
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
Expand Down Expand Up @@ -1012,9 +1011,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
// Allocate a new maxTinySize block.
span = c.alloc[tinySpanClass]
v := nextFreeFast(span)
var v gclinkptr
v, freeidx = nextFreeFast(span)
if v == 0 {
v, span, shouldhelpgc = c.nextFree(tinySpanClass)
v, span, freeidx, shouldhelpgc = c.nextFree(tinySpanClass)
}
x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
Expand All @@ -1037,9 +1037,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
size = uintptr(class_to_size[sizeclass])
spc := makeSpanClass(sizeclass, noscan)
span = c.alloc[spc]
v := nextFreeFast(span)
var v gclinkptr
v, freeidx = nextFreeFast(span)
if v == 0 {
v, span, shouldhelpgc = c.nextFree(spc)
v, span, freeidx, shouldhelpgc = c.nextFree(spc)
}
x = unsafe.Pointer(v)
if needzero && span.needzero != 0 {
Expand All @@ -1051,7 +1052,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// For large allocations, keep track of zeroed state so that
// bulk zeroing can be happen later in a preemptible context.
span = c.allocLarge(size, noscan)
span.freeindex = 1
freeidx = 0
span.allocCount = 1
size = span.elemsize
x = unsafe.Pointer(span.base())
Expand Down Expand Up @@ -1093,6 +1094,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// but see uninitialized memory or stale heap bits.
publicationBarrier()

// As x and the heap bits are initialized, update
// freeindx now so x is seen by the GC (including
// convervative scan) as an allocated object.
span.updateFreeIndex(freeidx + 1)

// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
Expand Down
17 changes: 13 additions & 4 deletions src/runtime/mbitmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@ func (s *mspan) refillAllocCache(whichByte uintptr) {
}

// nextFreeIndex returns the index of the next free object in s at
// or after s.freeindex.
// or after s.freeindex. s.freeindex is not updated (except the full
// span case), but the alloc cache is updated.
// There are hardware instructions that can be used to make this
// faster if profiling warrants it.
func (s *mspan) nextFreeIndex() uintptr {
Expand Down Expand Up @@ -170,9 +171,18 @@ func (s *mspan) nextFreeIndex() uintptr {
}

s.allocCache >>= uint(bitIndex + 1)
sfreeindex = result + 1

if sfreeindex%64 == 0 && sfreeindex != snelems {
// NOTE: s.freeindex is not updated for now (although allocCache
// is updated). mallocgc will update s.freeindex later after the
// memory is initialized.

return result
}

// updateFreeIndex updates s.freeindex to sfreeindex, refills
// the allocCache if necessary.
func (s *mspan) updateFreeIndex(sfreeindex uintptr) {
if sfreeindex%64 == 0 && sfreeindex != s.nelems {
// We just incremented s.freeindex so it isn't 0.
// As each 1 in s.allocCache was encountered and used for allocation
// it was shifted away. At this point s.allocCache contains all 0s.
Expand All @@ -182,7 +192,6 @@ func (s *mspan) nextFreeIndex() uintptr {
s.refillAllocCache(whichByte)
}
s.freeindex = sfreeindex
return result
}

// isFree reports whether the index'th object in s is unallocated.
Expand Down
1 change: 0 additions & 1 deletion src/runtime/mcentral.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ func (c *mcentral) cacheSpan() *mspan {
// Check if there's any free space.
freeIndex := s.nextFreeIndex()
if freeIndex != s.nelems {
s.freeindex = freeIndex
sweep.active.end(sl)
goto havespan
}
Expand Down

0 comments on commit bed2b7c

Please sign in to comment.