aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mklockrank.go
blob: 110d57faf07c69a842e620916edfd6c591c104af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build ignore

// mklockrank records the static rank graph of the locks in the
// runtime and generates the rank checking structures in lockrank.go.
package main

import (
	"bytes"
	"flag"
	"fmt"
	"go/format"
	"internal/dag"
	"io"
	"log"
	"os"
	"strings"
)

// ranks describes the lock rank graph. See "go doc internal/dag" for
// the syntax.
//
// "a < b" means a must be acquired before b if both are held
// (or, if b is held, a cannot be acquired).
//
// "NONE < a" means no locks may be held when a is acquired.
//
// If a lock is not given a rank, then it is assumed to be a leaf
// lock, which means no other lock can be acquired while it is held.
// Therefore, leaf locks do not need to be given an explicit rank.
//
// TODO: It's often hard to correlate rank names to locks. Change
// these to be more consistent with the locks they label.
const ranks = `
NONE < sysmon, sweepWaiters, assistQueue, cpuprof, sweep, pollDesc, deadlock, itab, notifyList, root, rwmutexW, defer;
sysmon < scavenge, forcegc;
assistQueue, cpuprof, forcegc, pollDesc, scavenge, sweep, sweepWaiters < sched;
sched < allg, allp;
allp < timers;
itab < reflectOffs;
scavenge, sweep < hchan;
scavenge < traceBuf;
allg, hchan, reflectOffs, timers, traceBuf < fin;
traceBuf < traceStrings;
allg, hchan, notifyList, reflectOffs, timers, traceStrings < mspanSpecial, profInsert, profBlock, profMemActive, gcBitsArenas, spanSetSpine, mheapSpecial;
profMemActive < profMemFuture;
hchan, root, sched, traceStrings, notifyList, fin < trace;
trace < traceStackTab;
timers < netpollInit;
rwmutexW, sysmon < rwmutexR;
gcBitsArenas, netpollInit, profBlock, profInsert, profMemFuture, spanSetSpine, traceStackTab < gscan;
gscan, rwmutexR < stackpool;
gscan < stackLarge;

# Generally, hchan must be acquired before gscan. But in one specific
# case (in syncadjustsudogs from markroot after the g has been suspended
# by suspendG), we allow gscan to be acquired, and then an hchan lock. To
# allow this case, we use this hchanLeaf rank in syncadjustsudogs(),
# rather than hchan. By using this special rank, we don't allow any further
# locks to be acquired other than more hchan locks.
gscan < hchanLeaf;

hchan, notifyList < sudog;
defer, gscan, mspanSpecial, sudog < wbufSpans;
stackLarge, stackpool, wbufSpans < mheap;
mheap, mheapSpecial < globalAlloc;

# panic is handled specially. It is implicitly below all other locks.
deadlock < panic;
`

// cyclicRanks lists lock ranks that allow multiple locks of the same
// rank to be acquired simultaneously. The runtime enforces ordering
// within these ranks using a separate mechanism.
var cyclicRanks = map[string]bool{
	// Multiple timers are locked simultaneously in destroy().
	"timers": true,
	// Multiple hchans are acquired in hchan.sortkey() order in
	// select.
	"hchan": true,
	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
	// syncadjustsudogs().
	"hchanLeaf": true,
}

func main() {
	flagO := flag.String("o", "", "write to `file` instead of stdout")
	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
	flag.Parse()
	if flag.NArg() != 0 {
		fmt.Fprintf(os.Stderr, "too many arguments")
		os.Exit(2)
	}

	g, err := dag.Parse(ranks)
	if err != nil {
		log.Fatal(err)
	}

	var out []byte
	if *flagDot {
		var b bytes.Buffer
		g.TransitiveReduction()
		// Add cyclic edges for visualization.
		for k := range cyclicRanks {
			g.AddEdge(k, k)
		}
		// Reverse the graph. It's much easier to read this as
		// a "<" partial order than a ">" partial order. This
		// ways, locks are acquired from the top going down
		// and time moves forward over the edges instead of
		// backward.
		g.Transpose()
		generateDot(&b, g)
		out = b.Bytes()
	} else {
		var b bytes.Buffer
		generateGo(&b, g)
		out, err = format.Source(b.Bytes())
		if err != nil {
			log.Fatal(err)
		}
	}

	if *flagO != "" {
		err = os.WriteFile(*flagO, out, 0666)
	} else {
		_, err = os.Stdout.Write(out)
	}
	if err != nil {
		log.Fatal(err)
	}
}

func generateGo(w io.Writer, g *dag.Graph) {
	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.

package runtime

type lockRank int

`)

	// Create numeric ranks.
	topo := g.Topo()
	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
		topo[i], topo[j] = topo[j], topo[i]
	}
	fmt.Fprintf(w, `
// Constants representing the ranks of all non-leaf runtime locks, in rank order.
// Locks with lower rank must be taken before locks with higher rank,
// in addition to satisfying the partial order in lockPartialOrder.
// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
	lockRankUnknown lockRank = iota

`)
	for _, rank := range topo {
		fmt.Fprintf(w, "\t%s\n", cname(rank))
	}
	fmt.Fprintf(w, `)

// lockRankLeafRank is the rank of lock that does not have a declared rank,
// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
`)

	// Create string table.
	fmt.Fprintf(w, `
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
`)
	for _, rank := range topo {
		fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
	}
	fmt.Fprintf(w, `}

func (rank lockRank) String() string {
	if rank == 0 {
		return "UNKNOWN"
	}
	if rank == lockRankLeafRank {
		return "LEAF"
	}
	if rank < 0 || int(rank) >= len(lockNames) {
		return "BAD RANK"
	}
	return lockNames[rank]
}
`)

	// Create partial order structure.
	fmt.Fprintf(w, `
// lockPartialOrder is the transitive closure of the lock rank graph.
// An entry for rank X lists all of the ranks that can already be held
// when rank X is acquired.
//
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
`)
	for _, rank := range topo {
		list := []string{}
		for _, before := range g.Edges(rank) {
			list = append(list, cname(before))
		}
		if cyclicRanks[rank] {
			list = append(list, cname(rank))
		}

		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
	}
	fmt.Fprintf(w, "}\n")
}

// cname returns the Go const name for the given lock rank label.
func cname(label string) string {
	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
}

// generateDot emits a Graphviz dot representation of g to w.
func generateDot(w io.Writer, g *dag.Graph) {
	fmt.Fprintf(w, "digraph g {\n")

	// Define all nodes.
	for _, node := range g.Nodes {
		fmt.Fprintf(w, "%q;\n", node)
	}

	// Create edges.
	for _, node := range g.Nodes {
		for _, to := range g.Edges(node) {
			fmt.Fprintf(w, "%q -> %q;\n", node, to)
		}
	}

	fmt.Fprintf(w, "}\n")
}