From 4d69d2616ca57d32c22db7f7380cd8796f06377a Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Tue, 26 Sep 2023 17:46:25 -0400 Subject: [PATCH 1/2] fix extra heap allocations when detector is disabled --- deadlock.go | 29 +++++++++++++++++++++-------- deadlock_test.go | 21 +++++++++++++++++++++ 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/deadlock.go b/deadlock.go index f5fb944..919053f 100644 --- a/deadlock.go +++ b/deadlock.go @@ -77,7 +77,14 @@ func (m *Mutex) Lock() { currID++ } counterMu.Unlock() - lock(m.mu.Lock, m, false) + + // shortcut for disabled deadlock detection to prevent extra copying of `m.mu.Lock` to the heap + if Opts.Disable { + m.mu.Lock() + return + } + + lockEnabled(m.mu.Lock, m, false) } // Unlock unlocks the mutex. @@ -121,7 +128,12 @@ func (m *RWMutex) Lock() { } counterMu.Unlock() - lock(m.mu.Lock, m, false) + if Opts.Disable { + m.mu.Lock() + return + } + + lockEnabled(m.mu.Lock, m, false) } // Unlock unlocks the mutex for writing. It is a run-time error if rw is @@ -149,7 +161,12 @@ func (m *RWMutex) RLock() { } counterMu.Unlock() - lock(m.mu.RLock, m, true) + if Opts.Disable { + m.mu.RLock() + return + } + + lockEnabled(m.mu.RLock, m, true) } // RUnlock undoes a single RLock call; @@ -189,11 +206,7 @@ func checkLockOrdering(skip int, p identifiable, gid int64) { lo.checkLockOrdering(skip, p, gid) } -func lock(lockFn func(), ptr identifiable, preLockCheckRecursiveLocking bool) { - if Opts.Disable { - lockFn() - return - } +func lockEnabled(lockFn func(), ptr identifiable, preLockCheckRecursiveLocking bool) { // grab the current goroutine identifier gid := goid.Get() preLock(4, ptr, gid, preLockCheckRecursiveLocking) diff --git a/deadlock_test.go b/deadlock_test.go index b8bd77e..e41e576 100644 --- a/deadlock_test.go +++ b/deadlock_test.go @@ -275,3 +275,24 @@ func TestRWMutexDoubleRLockFail(t *testing.T) { t.Fatalf("expected 1 deadlocks, detected %d", deadlocks) } } + +//go:noinline +func benchRWAlloc(mu *RWMutex, res *int) { + mu.Lock() + defer mu.Unlock() // defer prefers inlining for the benchmark + *res++ +} + +// BenchmarkRWMutexAlloc demonstrates there is one alloc per lock invocation. +func BenchmarkRWMutexAlloc(b *testing.B) { + disable := Opts.Disable + Opts.Disable = true + defer func() { + Opts.Disable = disable + }() + var mu *RWMutex = &RWMutex{} + var res int + for i := 0; i < b.N; i++ { + benchRWAlloc(mu, &res) + } +} From b0a4a3d9511165510f1fba2dc28b044787ca42fb Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Wed, 27 Sep 2023 10:21:44 -0400 Subject: [PATCH 2/2] Move disabled shortcut path to the top --- deadlock.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/deadlock.go b/deadlock.go index 919053f..6cdbf05 100644 --- a/deadlock.go +++ b/deadlock.go @@ -71,6 +71,12 @@ func (m *Mutex) id() lockID { // Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, // calling Opts.OnPotentialDeadlock on each occasion. func (m *Mutex) Lock() { + // shortcut for disabled deadlock detection to prevent extra copying of `m.mu.Lock` to the heap + if Opts.Disable { + m.mu.Lock() + return + } + counterMu.Lock() if m.muId == 0 { m.muId = currID @@ -78,12 +84,6 @@ func (m *Mutex) Lock() { } counterMu.Unlock() - // shortcut for disabled deadlock detection to prevent extra copying of `m.mu.Lock` to the heap - if Opts.Disable { - m.mu.Lock() - return - } - lockEnabled(m.mu.Lock, m, false) } @@ -121,6 +121,11 @@ func (m *RWMutex) id() lockID { // Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, // calling Opts.OnPotentialDeadlock on each occasion. func (m *RWMutex) Lock() { + if Opts.Disable { + m.mu.Lock() + return + } + counterMu.Lock() if m.muId == 0 { m.muId = currID @@ -128,11 +133,6 @@ func (m *RWMutex) Lock() { } counterMu.Unlock() - if Opts.Disable { - m.mu.Lock() - return - } - lockEnabled(m.mu.Lock, m, false) } @@ -154,6 +154,11 @@ func (m *RWMutex) Unlock() { // Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, // calling Opts.OnPotentialDeadlock on each occasion. func (m *RWMutex) RLock() { + if Opts.Disable { + m.mu.RLock() + return + } + counterMu.Lock() if m.muId == 0 { m.muId = currID @@ -161,11 +166,6 @@ func (m *RWMutex) RLock() { } counterMu.Unlock() - if Opts.Disable { - m.mu.RLock() - return - } - lockEnabled(m.mu.RLock, m, true) }