Skip to content

Commit c8ccbcd

Browse files
rhyshgopherbot
authored andcommitted
runtime: add direct benchmark of mutex contention
Measure throughput of a single mutex with all threads contending. Do not attempt to measure fairness/starvation. The ChanContended benchmark works somewhat well for this (interacting with the mutex is a large contributor to its results), but it's better to be clear about what we're attempting to measure. For #68578 Change-Id: Ie397b4c363bfcd5afddf796a81cd6c34ebf8551b Reviewed-on: https://go-review.googlesource.com/c/go/+/604375 Reviewed-by: David Chase <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Auto-Submit: Rhys Hiltner <[email protected]>
1 parent 820d445 commit c8ccbcd

File tree

1 file changed

+45
-3
lines changed

1 file changed

+45
-3
lines changed

src/runtime/runtime_test.go

+45-3
Original file line numberDiff line numberDiff line change
@@ -564,6 +564,48 @@ func BenchmarkOSYield(b *testing.B) {
564564
}
565565
}
566566

567+
func BenchmarkMutexContention(b *testing.B) {
568+
// Measure throughput of a single mutex with all threads contending
569+
//
570+
// Share a single counter across all threads. Progress from any thread is
571+
// progress for the benchmark as a whole. We don't measure or give points
572+
// for fairness here, arbitrary delay to any given thread's progress is
573+
// invisible and allowed.
574+
//
575+
// The cache line that holds the count value will need to move between
576+
// processors, but not as often as the cache line that holds the mutex. The
577+
// mutex protects access to the count value, which limits contention on that
578+
// cache line. This is a simple design, but it helps to make the behavior of
579+
// the benchmark clear. Most real uses of mutex will protect some number of
580+
// cache lines anyway.
581+
582+
var state struct {
583+
_ cpu.CacheLinePad
584+
lock Mutex
585+
_ cpu.CacheLinePad
586+
count atomic.Int64
587+
_ cpu.CacheLinePad
588+
}
589+
590+
procs := GOMAXPROCS(0)
591+
var wg sync.WaitGroup
592+
for range procs {
593+
wg.Add(1)
594+
go func() {
595+
defer wg.Done()
596+
for {
597+
Lock(&state.lock)
598+
ours := state.count.Add(1)
599+
Unlock(&state.lock)
600+
if ours >= int64(b.N) {
601+
return
602+
}
603+
}
604+
}()
605+
}
606+
wg.Wait()
607+
}
608+
567609
func BenchmarkMutexHandoff(b *testing.B) {
568610
testcase := func(delay func(l *Mutex)) func(b *testing.B) {
569611
return func(b *testing.B) {
@@ -590,11 +632,11 @@ func BenchmarkMutexHandoff(b *testing.B) {
590632
// each other in a non-blocking way via the "turn" state.
591633

592634
var state struct {
593-
_ [cpu.CacheLinePadSize]byte
635+
_ cpu.CacheLinePad
594636
lock Mutex
595-
_ [cpu.CacheLinePadSize]byte
637+
_ cpu.CacheLinePad
596638
turn atomic.Int64
597-
_ [cpu.CacheLinePadSize]byte
639+
_ cpu.CacheLinePad
598640
}
599641

600642
var delta atomic.Int64

0 commit comments

Comments
 (0)