Source file src/runtime/mklockrank.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build ignore
     6  
     7  // mklockrank records the static rank graph of the locks in the
     8  // runtime and generates the rank checking structures in lockrank.go.
     9  package main
    10  
    11  import (
    12  	"bytes"
    13  	"flag"
    14  	"fmt"
    15  	"go/format"
    16  	"internal/dag"
    17  	"io"
    18  	"log"
    19  	"os"
    20  	"strings"
    21  )
    22  
    23  // ranks describes the lock rank graph. See "go doc internal/dag" for
    24  // the syntax.
    25  //
    26  // "a < b" means a must be acquired before b if both are held
    27  // (or, if b is held, a cannot be acquired).
    28  //
    29  // "NONE < a" means no locks may be held when a is acquired.
    30  //
    31  // If a lock is not given a rank, then it is assumed to be a leaf
    32  // lock, which means no other lock can be acquired while it is held.
    33  // Therefore, leaf locks do not need to be given an explicit rank.
    34  //
    35  // Ranks in all caps are pseudo-nodes that help define order, but do
    36  // not actually define a rank.
    37  //
    38  // TODO: It's often hard to correlate rank names to locks. Change
    39  // these to be more consistent with the locks they label.
    40  const ranks = `
    41  # Sysmon
    42  NONE
    43  < sysmon
    44  < scavenge, forcegc, computeMaxProcs, updateMaxProcsG;
    45  
    46  # Defer
    47  NONE < defer;
    48  
    49  # GC
    50  NONE <
    51    sweepWaiters,
    52    assistQueue,
    53    strongFromWeakQueue,
    54    cleanupQueue,
    55    sweep;
    56  
    57  # Test only
    58  NONE < testR, testW;
    59  
    60  # vgetrandom
    61  NONE < vgetrandom;
    62  
    63  NONE < timerSend;
    64  
    65  # Scheduler, timers, netpoll
    66  NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
    67  scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
    68  assistQueue,
    69    cleanupQueue,
    70    computeMaxProcs,
    71    cpuprof,
    72    forcegc,
    73    updateMaxProcsG,
    74    hchan,
    75    pollDesc, # pollDesc can interact with timers, which can lock sched.
    76    scavenge,
    77    strongFromWeakQueue,
    78    sweep,
    79    sweepWaiters,
    80    testR,
    81    wakeableSleep
    82  # Above SCHED are things that can call into the scheduler.
    83  < SCHED
    84  # Below SCHED is the scheduler implementation.
    85  < allocmR,
    86    execR;
    87  allocmR, execR, hchan < sched;
    88  sched < allg, allp;
    89  
    90  # Channels
    91  NONE < notifyList;
    92  hchan, notifyList < sudog;
    93  
    94  hchan, pollDesc, wakeableSleep < timers;
    95  timers, timerSend < timer < netpollInit;
    96  
    97  # Semaphores
    98  NONE < root;
    99  
   100  # Itabs
   101  NONE
   102  < itab
   103  < reflectOffs;
   104  
   105  # Synctest
   106  hchan,
   107    notifyList,
   108    reflectOffs,
   109    root,
   110    strongFromWeakQueue,
   111    sweepWaiters,
   112    timer,
   113    timers
   114  < synctest;
   115  
   116  # User arena state
   117  NONE < userArenaState;
   118  
   119  # Tracing without a P uses a global trace buffer.
   120  scavenge
   121  # Above TRACEGLOBAL can emit a trace event without a P.
   122  < TRACEGLOBAL
   123  # Below TRACEGLOBAL manages the global tracing buffer.
   124  # Note that traceBuf eventually chains to MALLOC, but we never get that far
   125  # in the situation where there's no P.
   126  < traceBuf;
   127  # Starting/stopping tracing traces strings.
   128  traceBuf < traceStrings;
   129  
   130  # Malloc
   131  allg,
   132    allocmR,
   133    allp, # procresize
   134    execR, # May grow stack
   135    execW, # May allocate after BeforeFork
   136    hchan,
   137    notifyList,
   138    reflectOffs,
   139    timer,
   140    traceStrings,
   141    userArenaState,
   142    vgetrandom
   143  # Above MALLOC are things that can allocate memory.
   144  < MALLOC
   145  # Below MALLOC is the malloc implementation.
   146  < fin,
   147    spanSetSpine,
   148    mspanSpecial,
   149    traceTypeTab,
   150    MPROF;
   151  
   152  # We can acquire gcBitsArenas for pinner bits, and
   153  # it's guarded by mspanSpecial.
   154  MALLOC, mspanSpecial < gcBitsArenas;
   155  
   156  # Memory profiling
   157  MPROF < profInsert, profBlock, profMemActive;
   158  profMemActive < profMemFuture;
   159  
   160  # Stack allocation and copying
   161  gcBitsArenas,
   162    netpollInit,
   163    profBlock,
   164    profInsert,
   165    profMemFuture,
   166    spanSetSpine,
   167    synctest,
   168    fin,
   169    root
   170  # Anything that can grow the stack can acquire STACKGROW.
   171  # (Most higher layers imply STACKGROW, like MALLOC.)
   172  < STACKGROW
   173  # Below STACKGROW is the stack allocator/copying implementation.
   174  < gscan;
   175  gscan < stackpool;
   176  gscan < stackLarge;
   177  # Generally, hchan must be acquired before gscan. But in one case,
   178  # where we suspend a G and then shrink its stack, syncadjustsudogs
   179  # can acquire hchan locks while holding gscan. To allow this case,
   180  # we use hchanLeaf instead of hchan.
   181  gscan < hchanLeaf;
   182  
   183  # Write barrier
   184  defer,
   185    gscan,
   186    mspanSpecial,
   187    pollCache,
   188    sudog,
   189    timer
   190  # Anything that can have write barriers can acquire WB.
   191  # Above WB, we can have write barriers.
   192  < WB
   193  # Below WB is the write barrier implementation.
   194  < wbufSpans;
   195  
   196  # xRegState allocator
   197  sched < xRegAlloc;
   198  
   199  # spanSPMCs allocator and list
   200  WB, sched < spanSPMCs;
   201  
   202  # Span allocator
   203  stackLarge,
   204    stackpool,
   205    wbufSpans
   206  # Above mheap is anything that can call the span allocator.
   207  < mheap;
   208  # Below mheap is the span allocator implementation.
   209  #
   210  # Specials: we're allowed to allocate a special while holding
   211  # an mspanSpecial lock, and they're part of the malloc implementation.
   212  # Pinner bits might be freed by the span allocator.
   213  mheap, mspanSpecial < mheapSpecial;
   214  # Fixallocs
   215  mheap, mheapSpecial, xRegAlloc, spanSPMCs < globalAlloc;
   216  
   217  # Execution tracer events (with a P)
   218  hchan,
   219    mheap,
   220    root,
   221    sched,
   222    traceStrings,
   223    notifyList,
   224    fin
   225  # Above TRACE is anything that can create a trace event
   226  < TRACE
   227  < trace
   228  < traceStackTab;
   229  
   230  # panic is handled specially. It is implicitly below all other locks.
   231  NONE < panic;
   232  # deadlock is not acquired while holding panic, but it also needs to be
   233  # below all other locks.
   234  panic < deadlock;
   235  # raceFini is only held while exiting.
   236  panic < raceFini;
   237  
   238  # RWMutex internal read lock
   239  
   240  allocmR,
   241    allocmW
   242  < allocmRInternal;
   243  
   244  execR,
   245    execW
   246  < execRInternal;
   247  
   248  testR,
   249    testW
   250  < testRInternal;
   251  `
   252  
   253  // cyclicRanks lists lock ranks that allow multiple locks of the same
   254  // rank to be acquired simultaneously. The runtime enforces ordering
   255  // within these ranks using a separate mechanism.
   256  var cyclicRanks = map[string]bool{
   257  	// Multiple timers are locked simultaneously in destroy().
   258  	"timers": true,
   259  	// Multiple hchans are acquired in hchan.sortkey() order in
   260  	// select.
   261  	"hchan": true,
   262  	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
   263  	// syncadjustsudogs().
   264  	"hchanLeaf": true,
   265  	// The point of the deadlock lock is to deadlock.
   266  	"deadlock": true,
   267  }
   268  
   269  func main() {
   270  	flagO := flag.String("o", "", "write to `file` instead of stdout")
   271  	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
   272  	flag.Parse()
   273  	if flag.NArg() != 0 {
   274  		fmt.Fprintf(os.Stderr, "too many arguments")
   275  		os.Exit(2)
   276  	}
   277  
   278  	g, err := dag.Parse(ranks)
   279  	if err != nil {
   280  		log.Fatal(err)
   281  	}
   282  
   283  	var out []byte
   284  	if *flagDot {
   285  		var b bytes.Buffer
   286  		g.TransitiveReduction()
   287  		// Add cyclic edges for visualization.
   288  		for k := range cyclicRanks {
   289  			g.AddEdge(k, k)
   290  		}
   291  		// Reverse the graph. It's much easier to read this as
   292  		// a "<" partial order than a ">" partial order. This
   293  		// ways, locks are acquired from the top going down
   294  		// and time moves forward over the edges instead of
   295  		// backward.
   296  		g.Transpose()
   297  		generateDot(&b, g)
   298  		out = b.Bytes()
   299  	} else {
   300  		var b bytes.Buffer
   301  		generateGo(&b, g)
   302  		out, err = format.Source(b.Bytes())
   303  		if err != nil {
   304  			log.Fatal(err)
   305  		}
   306  	}
   307  
   308  	if *flagO != "" {
   309  		err = os.WriteFile(*flagO, out, 0666)
   310  	} else {
   311  		_, err = os.Stdout.Write(out)
   312  	}
   313  	if err != nil {
   314  		log.Fatal(err)
   315  	}
   316  }
   317  
   318  func generateGo(w io.Writer, g *dag.Graph) {
   319  	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
   320  
   321  package runtime
   322  
   323  type lockRank int
   324  
   325  `)
   326  
   327  	// Create numeric ranks.
   328  	topo := g.Topo()
   329  	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
   330  		topo[i], topo[j] = topo[j], topo[i]
   331  	}
   332  	fmt.Fprintf(w, `
   333  // Constants representing the ranks of all non-leaf runtime locks, in rank order.
   334  // Locks with lower rank must be taken before locks with higher rank,
   335  // in addition to satisfying the partial order in lockPartialOrder.
   336  // A few ranks allow self-cycles, which are specified in lockPartialOrder.
   337  const (
   338  	lockRankUnknown lockRank = iota
   339  
   340  `)
   341  	for _, rank := range topo {
   342  		if isPseudo(rank) {
   343  			fmt.Fprintf(w, "\t// %s\n", rank)
   344  		} else {
   345  			fmt.Fprintf(w, "\t%s\n", cname(rank))
   346  		}
   347  	}
   348  	fmt.Fprintf(w, `)
   349  
   350  // lockRankLeafRank is the rank of lock that does not have a declared rank,
   351  // and hence is a leaf lock.
   352  const lockRankLeafRank lockRank = 1000
   353  `)
   354  
   355  	// Create string table.
   356  	fmt.Fprintf(w, `
   357  // lockNames gives the names associated with each of the above ranks.
   358  var lockNames = []string{
   359  `)
   360  	for _, rank := range topo {
   361  		if !isPseudo(rank) {
   362  			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
   363  		}
   364  	}
   365  	fmt.Fprintf(w, `}
   366  
   367  func (rank lockRank) String() string {
   368  	if rank == 0 {
   369  		return "UNKNOWN"
   370  	}
   371  	if rank == lockRankLeafRank {
   372  		return "LEAF"
   373  	}
   374  	if rank < 0 || int(rank) >= len(lockNames) {
   375  		return "BAD RANK"
   376  	}
   377  	return lockNames[rank]
   378  }
   379  `)
   380  
   381  	// Create partial order structure.
   382  	fmt.Fprintf(w, `
   383  // lockPartialOrder is the transitive closure of the lock rank graph.
   384  // An entry for rank X lists all of the ranks that can already be held
   385  // when rank X is acquired.
   386  //
   387  // Lock ranks that allow self-cycles list themselves.
   388  var lockPartialOrder [][]lockRank = [][]lockRank{
   389  `)
   390  	for _, rank := range topo {
   391  		if isPseudo(rank) {
   392  			continue
   393  		}
   394  		list := []string{}
   395  		for _, before := range g.Edges(rank) {
   396  			if !isPseudo(before) {
   397  				list = append(list, cname(before))
   398  			}
   399  		}
   400  		if cyclicRanks[rank] {
   401  			list = append(list, cname(rank))
   402  		}
   403  
   404  		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
   405  	}
   406  	fmt.Fprintf(w, "}\n")
   407  }
   408  
   409  // cname returns the Go const name for the given lock rank label.
   410  func cname(label string) string {
   411  	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
   412  }
   413  
   414  func isPseudo(label string) bool {
   415  	return strings.ToUpper(label) == label
   416  }
   417  
   418  // generateDot emits a Graphviz dot representation of g to w.
   419  func generateDot(w io.Writer, g *dag.Graph) {
   420  	fmt.Fprintf(w, "digraph g {\n")
   421  
   422  	// Define all nodes.
   423  	for _, node := range g.Nodes {
   424  		fmt.Fprintf(w, "%q;\n", node)
   425  	}
   426  
   427  	// Create edges.
   428  	for _, node := range g.Nodes {
   429  		for _, to := range g.Edges(node) {
   430  			fmt.Fprintf(w, "%q -> %q;\n", node, to)
   431  		}
   432  	}
   433  
   434  	fmt.Fprintf(w, "}\n")
   435  }
   436  

View as plain text