Source file
src/runtime/mklockrank.go
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 strongFromWeakQueue,
54 sweep;
55
56 # Test only
57 NONE < testR, testW;
58
59 NONE < timerSend;
60
61 # Scheduler, timers, netpoll
62 NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
63 scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
64 assistQueue,
65 cpuprof,
66 forcegc,
67 hchan,
68 pollDesc, # pollDesc can interact with timers, which can lock sched.
69 scavenge,
70 strongFromWeakQueue,
71 sweep,
72 sweepWaiters,
73 testR,
74 wakeableSleep
75 # Above SCHED are things that can call into the scheduler.
76 < SCHED
77 # Below SCHED is the scheduler implementation.
78 < allocmR,
79 execR;
80 allocmR, execR, hchan < sched;
81 sched < allg, allp;
82
83 # Channels
84 NONE < notifyList;
85 hchan, notifyList < sudog;
86
87 hchan, pollDesc, wakeableSleep < timers;
88 timers, timerSend < timer < netpollInit;
89
90 # Semaphores
91 NONE < root;
92
93 # Itabs
94 NONE
95 < itab
96 < reflectOffs;
97
98 # User arena state
99 NONE < userArenaState;
100
101 # Tracing without a P uses a global trace buffer.
102 scavenge
103 # Above TRACEGLOBAL can emit a trace event without a P.
104 < TRACEGLOBAL
105 # Below TRACEGLOBAL manages the global tracing buffer.
106 # Note that traceBuf eventually chains to MALLOC, but we never get that far
107 # in the situation where there's no P.
108 < traceBuf;
109 # Starting/stopping tracing traces strings.
110 traceBuf < traceStrings;
111
112 # Malloc
113 allg,
114 allocmR,
115 allp, # procresize
116 execR, # May grow stack
117 execW, # May allocate after BeforeFork
118 hchan,
119 notifyList,
120 reflectOffs,
121 timer,
122 traceStrings,
123 userArenaState
124 # Above MALLOC are things that can allocate memory.
125 < MALLOC
126 # Below MALLOC is the malloc implementation.
127 < fin,
128 spanSetSpine,
129 mspanSpecial,
130 traceTypeTab,
131 MPROF;
132
133 # We can acquire gcBitsArenas for pinner bits, and
134 # it's guarded by mspanSpecial.
135 MALLOC, mspanSpecial < gcBitsArenas;
136
137 # Memory profiling
138 MPROF < profInsert, profBlock, profMemActive;
139 profMemActive < profMemFuture;
140
141 # Stack allocation and copying
142 gcBitsArenas,
143 netpollInit,
144 profBlock,
145 profInsert,
146 profMemFuture,
147 spanSetSpine,
148 fin,
149 root
150 # Anything that can grow the stack can acquire STACKGROW.
151 # (Most higher layers imply STACKGROW, like MALLOC.)
152 < STACKGROW
153 # Below STACKGROW is the stack allocator/copying implementation.
154 < gscan;
155 gscan < stackpool;
156 gscan < stackLarge;
157 # Generally, hchan must be acquired before gscan. But in one case,
158 # where we suspend a G and then shrink its stack, syncadjustsudogs
159 # can acquire hchan locks while holding gscan. To allow this case,
160 # we use hchanLeaf instead of hchan.
161 gscan < hchanLeaf;
162
163 # Write barrier
164 defer,
165 gscan,
166 mspanSpecial,
167 pollCache,
168 sudog,
169 timer
170 # Anything that can have write barriers can acquire WB.
171 # Above WB, we can have write barriers.
172 < WB
173 # Below WB is the write barrier implementation.
174 < wbufSpans;
175
176 # Span allocator
177 stackLarge,
178 stackpool,
179 wbufSpans
180 # Above mheap is anything that can call the span allocator.
181 < mheap;
182 # Below mheap is the span allocator implementation.
183 #
184 # Specials: we're allowed to allocate a special while holding
185 # an mspanSpecial lock, and they're part of the malloc implementation.
186 # Pinner bits might be freed by the span allocator.
187 mheap, mspanSpecial < mheapSpecial;
188 mheap, mheapSpecial < globalAlloc;
189
190 # Execution tracer events (with a P)
191 hchan,
192 mheap,
193 root,
194 sched,
195 traceStrings,
196 notifyList,
197 fin
198 # Above TRACE is anything that can create a trace event
199 < TRACE
200 < trace
201 < traceStackTab;
202
203 # panic is handled specially. It is implicitly below all other locks.
204 NONE < panic;
205 # deadlock is not acquired while holding panic, but it also needs to be
206 # below all other locks.
207 panic < deadlock;
208 # raceFini is only held while exiting.
209 panic < raceFini;
210
211 # RWMutex internal read lock
212
213 allocmR,
214 allocmW
215 < allocmRInternal;
216
217 execR,
218 execW
219 < execRInternal;
220
221 testR,
222 testW
223 < testRInternal;
224 `
225
226
227
228
229 var cyclicRanks = map[string]bool{
230
231 "timers": true,
232
233
234 "hchan": true,
235
236
237 "hchanLeaf": true,
238
239 "deadlock": true,
240 }
241
242 func main() {
243 flagO := flag.String("o", "", "write to `file` instead of stdout")
244 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
245 flag.Parse()
246 if flag.NArg() != 0 {
247 fmt.Fprintf(os.Stderr, "too many arguments")
248 os.Exit(2)
249 }
250
251 g, err := dag.Parse(ranks)
252 if err != nil {
253 log.Fatal(err)
254 }
255
256 var out []byte
257 if *flagDot {
258 var b bytes.Buffer
259 g.TransitiveReduction()
260
261 for k := range cyclicRanks {
262 g.AddEdge(k, k)
263 }
264
265
266
267
268
269 g.Transpose()
270 generateDot(&b, g)
271 out = b.Bytes()
272 } else {
273 var b bytes.Buffer
274 generateGo(&b, g)
275 out, err = format.Source(b.Bytes())
276 if err != nil {
277 log.Fatal(err)
278 }
279 }
280
281 if *flagO != "" {
282 err = os.WriteFile(*flagO, out, 0666)
283 } else {
284 _, err = os.Stdout.Write(out)
285 }
286 if err != nil {
287 log.Fatal(err)
288 }
289 }
290
291 func generateGo(w io.Writer, g *dag.Graph) {
292 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
293
294 package runtime
295
296 type lockRank int
297
298 `)
299
300
301 topo := g.Topo()
302 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
303 topo[i], topo[j] = topo[j], topo[i]
304 }
305 fmt.Fprintf(w, `
306 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
307 // Locks with lower rank must be taken before locks with higher rank,
308 // in addition to satisfying the partial order in lockPartialOrder.
309 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
310 const (
311 lockRankUnknown lockRank = iota
312
313 `)
314 for _, rank := range topo {
315 if isPseudo(rank) {
316 fmt.Fprintf(w, "\t// %s\n", rank)
317 } else {
318 fmt.Fprintf(w, "\t%s\n", cname(rank))
319 }
320 }
321 fmt.Fprintf(w, `)
322
323 // lockRankLeafRank is the rank of lock that does not have a declared rank,
324 // and hence is a leaf lock.
325 const lockRankLeafRank lockRank = 1000
326 `)
327
328
329 fmt.Fprintf(w, `
330 // lockNames gives the names associated with each of the above ranks.
331 var lockNames = []string{
332 `)
333 for _, rank := range topo {
334 if !isPseudo(rank) {
335 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
336 }
337 }
338 fmt.Fprintf(w, `}
339
340 func (rank lockRank) String() string {
341 if rank == 0 {
342 return "UNKNOWN"
343 }
344 if rank == lockRankLeafRank {
345 return "LEAF"
346 }
347 if rank < 0 || int(rank) >= len(lockNames) {
348 return "BAD RANK"
349 }
350 return lockNames[rank]
351 }
352 `)
353
354
355 fmt.Fprintf(w, `
356 // lockPartialOrder is the transitive closure of the lock rank graph.
357 // An entry for rank X lists all of the ranks that can already be held
358 // when rank X is acquired.
359 //
360 // Lock ranks that allow self-cycles list themselves.
361 var lockPartialOrder [][]lockRank = [][]lockRank{
362 `)
363 for _, rank := range topo {
364 if isPseudo(rank) {
365 continue
366 }
367 list := []string{}
368 for _, before := range g.Edges(rank) {
369 if !isPseudo(before) {
370 list = append(list, cname(before))
371 }
372 }
373 if cyclicRanks[rank] {
374 list = append(list, cname(rank))
375 }
376
377 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
378 }
379 fmt.Fprintf(w, "}\n")
380 }
381
382
383 func cname(label string) string {
384 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
385 }
386
387 func isPseudo(label string) bool {
388 return strings.ToUpper(label) == label
389 }
390
391
392 func generateDot(w io.Writer, g *dag.Graph) {
393 fmt.Fprintf(w, "digraph g {\n")
394
395
396 for _, node := range g.Nodes {
397 fmt.Fprintf(w, "%q;\n", node)
398 }
399
400
401 for _, node := range g.Nodes {
402 for _, to := range g.Edges(node) {
403 fmt.Fprintf(w, "%q -> %q;\n", node, to)
404 }
405 }
406
407 fmt.Fprintf(w, "}\n")
408 }
409
View as plain text