aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mklockrank.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mklockrank.go')
-rw-r--r--src/runtime/mklockrank.go42
1 files changed, 34 insertions, 8 deletions
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
index bc15e57dd4..ef2f07d68a 100644
--- a/src/runtime/mklockrank.go
+++ b/src/runtime/mklockrank.go
@@ -52,29 +52,38 @@ NONE <
assistQueue,
sweep;
+# Test only
+NONE < testR, testW;
+
# Scheduler, timers, netpoll
-NONE < pollDesc, cpuprof;
+NONE <
+ allocmW,
+ execW,
+ cpuprof,
+ pollDesc;
assistQueue,
cpuprof,
forcegc,
pollDesc, # pollDesc can interact with timers, which can lock sched.
scavenge,
sweep,
- sweepWaiters
+ sweepWaiters,
+ testR
+# Above SCHED are things that can call into the scheduler.
+< SCHED
+# Below SCHED is the scheduler implementation.
+< allocmR,
+ execR
< sched;
sched < allg, allp;
allp < timers;
timers < netpollInit;
# Channels
-scavenge, sweep < hchan;
+scavenge, sweep, testR < hchan;
NONE < notifyList;
hchan, notifyList < sudog;
-# RWMutex
-NONE < rwmutexW;
-rwmutexW, sysmon < rwmutexR;
-
# Semaphores
NONE < root;
@@ -99,6 +108,9 @@ traceBuf < traceStrings;
# Malloc
allg,
+ allocmR,
+ execR, # May grow stack
+ execW, # May allocate after BeforeFork
hchan,
notifyList,
reflectOffs,
@@ -133,7 +145,7 @@ gcBitsArenas,
< STACKGROW
# Below STACKGROW is the stack allocator/copying implementation.
< gscan;
-gscan, rwmutexR < stackpool;
+gscan < stackpool;
gscan < stackLarge;
# Generally, hchan must be acquired before gscan. But in one case,
# where we suspend a G and then shrink its stack, syncadjustsudogs
@@ -179,6 +191,20 @@ NONE < panic;
# deadlock is not acquired while holding panic, but it also needs to be
# below all other locks.
panic < deadlock;
+
+# RWMutex internal read lock
+
+allocmR,
+ allocmW
+< allocmRInternal;
+
+execR,
+ execW
+< execRInternal;
+
+testR,
+ testW
+< testRInternal;
`
// cyclicRanks lists lock ranks that allow multiple locks of the same