From 502b6057d2a30b284a3889e4439b47289244382b Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 20 Jul 2022 14:12:56 -0400 Subject: [PATCH] runtime: add mayAcquire annotation for trace.lock Now that we've moved the trace locks to the leaf of the lock graph, we can safely annotate that any trace event may acquire trace.lock even if dynamically it turns out a particular event doesn't need to flush and acquire this lock. This reveals a new edge where we can trace while holding the mheap lock, so we add this to the lock graph. For #53789. Updates #53979. Change-Id: I13e2f6cd1b621cca4bed0cc13ef12e64d05c89a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/418720 Reviewed-by: Michael Knyszek Run-TryBot: Austin Clements TryBot-Result: Gopher Robot --- src/runtime/lockrank.go | 4 ++-- src/runtime/mklockrank.go | 1 + src/runtime/trace.go | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index ba4dd71c71f685..fbfef357708d8e 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -174,8 +174,8 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMheapSpecial, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankTrace}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 4445e8327e3d29..9f269fd7b588fa 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -159,6 +159,7 @@ mheap, mheapSpecial < globalAlloc; # Execution tracer events (with a P) hchan, + mheap, root, sched, traceStrings, diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 56fd1ba37b6361..4290d922408240 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -883,6 +883,11 @@ func traceStackID(mp *m, buf []uintptr, skip int) uint64 { // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) { + // Any time we acquire a buffer, we may end up flushing it, + // but flushes are rare. Record the lock edge even if it + // doesn't happen this time. + lockRankMayTraceFlush() + mp = acquirem() if p := mp.p.ptr(); p != nil { return mp, p.id, &p.tracebuf @@ -899,6 +904,16 @@ func traceReleaseBuffer(pid int32) { releasem(getg().m) } +// lockRankMayTraceFlush records the lock ranking effects of a +// potential call to traceFlush. +func lockRankMayTraceFlush() { + owner := trace.lockOwner + dolock := owner == nil || owner != getg().m.curg + if dolock { + lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock)) + } +} + // traceFlush puts buf onto stack of full buffers and returns an empty buffer. // // This must run on the system stack because it acquires trace.lock.