Source file src/runtime/mklockrank.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build ignore
     6  
     7  // mklockrank records the static rank graph of the locks in the
     8  // runtime and generates the rank checking structures in lockrank.go.
     9  package main
    10  
    11  import (
    12  	"bytes"
    13  	"flag"
    14  	"fmt"
    15  	"go/format"
    16  	"internal/dag"
    17  	"io"
    18  	"log"
    19  	"os"
    20  	"strings"
    21  )
    22  
    23  // ranks describes the lock rank graph. See "go doc internal/dag" for
    24  // the syntax.
    25  //
    26  // "a < b" means a must be acquired before b if both are held
    27  // (or, if b is held, a cannot be acquired).
    28  //
    29  // "NONE < a" means no locks may be held when a is acquired.
    30  //
    31  // If a lock is not given a rank, then it is assumed to be a leaf
    32  // lock, which means no other lock can be acquired while it is held.
    33  // Therefore, leaf locks do not need to be given an explicit rank.
    34  //
    35  // Ranks in all caps are pseudo-nodes that help define order, but do
    36  // not actually define a rank.
    37  //
    38  // TODO: It's often hard to correlate rank names to locks. Change
    39  // these to be more consistent with the locks they label.
    40  const ranks = `
    41  # Sysmon
    42  NONE
    43  < sysmon
    44  < scavenge, forcegc, computeMaxProcs, updateMaxProcsG;
    45  
    46  # Defer
    47  NONE < defer;
    48  
    49  # GC
    50  NONE <
    51    sweepWaiters,
    52    assistQueue,
    53    strongFromWeakQueue,
    54    cleanupQueue,
    55    sweep;
    56  
    57  # Test only
    58  NONE < testR, testW;
    59  
    60  # vgetrandom
    61  NONE < vgetrandom;
    62  
    63  NONE < timerSend;
    64  
    65  # Scheduler, timers, netpoll
    66  NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
    67  scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
    68  assistQueue,
    69    cleanupQueue,
    70    computeMaxProcs,
    71    cpuprof,
    72    forcegc,
    73    updateMaxProcsG,
    74    hchan,
    75    pollDesc, # pollDesc can interact with timers, which can lock sched.
    76    scavenge,
    77    strongFromWeakQueue,
    78    sweep,
    79    sweepWaiters,
    80    testR,
    81    wakeableSleep
    82  # Above SCHED are things that can call into the scheduler.
    83  < SCHED
    84  # Below SCHED is the scheduler implementation.
    85  < allocmR,
    86    execR;
    87  allocmR, execR, hchan < sched;
    88  sched < allg, allp;
    89  
    90  # Channels
    91  NONE < notifyList;
    92  hchan, notifyList < sudog;
    93  
    94  hchan, pollDesc, wakeableSleep < timers;
    95  timers, timerSend < timer < netpollInit;
    96  
    97  # Semaphores
    98  NONE < root;
    99  
   100  # Itabs
   101  NONE
   102  < itab
   103  < reflectOffs;
   104  
   105  # Typelinks
   106  NONE
   107  < typelinks;
   108  
   109  # Synctest
   110  hchan,
   111    notifyList,
   112    reflectOffs,
   113    root,
   114    strongFromWeakQueue,
   115    sweepWaiters,
   116    timer,
   117    timers
   118  < synctest;
   119  
   120  # User arena state
   121  NONE < userArenaState;
   122  
   123  # Tracing without a P uses a global trace buffer.
   124  scavenge
   125  # Above TRACEGLOBAL can emit a trace event without a P.
   126  < TRACEGLOBAL
   127  # Below TRACEGLOBAL manages the global tracing buffer.
   128  # Note that traceBuf eventually chains to MALLOC, but we never get that far
   129  # in the situation where there's no P.
   130  < traceBuf;
   131  # Starting/stopping tracing traces strings.
   132  traceBuf < traceStrings;
   133  
   134  # Malloc
   135  allg,
   136    allocmR,
   137    allp, # procresize
   138    execR, # May grow stack
   139    execW, # May allocate after BeforeFork
   140    hchan,
   141    notifyList,
   142    reflectOffs,
   143    timer,
   144    traceStrings,
   145    typelinks,
   146    userArenaState,
   147    vgetrandom
   148  # Above MALLOC are things that can allocate memory.
   149  < MALLOC
   150  # Below MALLOC is the malloc implementation.
   151  < fin,
   152    spanSetSpine,
   153    mspanSpecial,
   154    traceTypeTab,
   155    MPROF;
   156  
   157  # We can acquire gcBitsArenas for pinner bits, and
   158  # it's guarded by mspanSpecial.
   159  MALLOC, mspanSpecial < gcBitsArenas;
   160  
   161  # Memory profiling
   162  MPROF < profInsert, profBlock, profMemActive;
   163  profMemActive < profMemFuture;
   164  
   165  # Stack allocation and copying
   166  gcBitsArenas,
   167    netpollInit,
   168    profBlock,
   169    profInsert,
   170    profMemFuture,
   171    spanSetSpine,
   172    synctest,
   173    fin,
   174    root
   175  # Anything that can grow the stack can acquire STACKGROW.
   176  # (Most higher layers imply STACKGROW, like MALLOC.)
   177  < STACKGROW
   178  # Below STACKGROW is the stack allocator/copying implementation.
   179  < gscan;
   180  gscan < stackpool;
   181  gscan < stackLarge;
   182  # Generally, hchan must be acquired before gscan. But in one case,
   183  # where we suspend a G and then shrink its stack, syncadjustsudogs
   184  # can acquire hchan locks while holding gscan. To allow this case,
   185  # we use hchanLeaf instead of hchan.
   186  gscan < hchanLeaf;
   187  
   188  # Write barrier
   189  defer,
   190    gscan,
   191    mspanSpecial,
   192    pollCache,
   193    sudog,
   194    timer
   195  # Anything that can have write barriers can acquire WB.
   196  # Above WB, we can have write barriers.
   197  < WB
   198  # Below WB is the write barrier implementation.
   199  < wbufSpans;
   200  
   201  # xRegState allocator
   202  sched < xRegAlloc;
   203  
   204  # spanSPMCs allocator and list
   205  WB, sched < spanSPMCs;
   206  
   207  # Span allocator
   208  stackLarge,
   209    stackpool,
   210    wbufSpans
   211  # Above mheap is anything that can call the span allocator.
   212  < mheap;
   213  # Below mheap is the span allocator implementation.
   214  #
   215  # Specials: we're allowed to allocate a special while holding
   216  # an mspanSpecial lock, and they're part of the malloc implementation.
   217  # Pinner bits might be freed by the span allocator.
   218  mheap, mspanSpecial < mheapSpecial;
   219  # Fixallocs
   220  mheap, mheapSpecial, xRegAlloc, spanSPMCs < globalAlloc;
   221  
   222  # Execution tracer events (with a P)
   223  hchan,
   224    mheap,
   225    root,
   226    sched,
   227    traceStrings,
   228    notifyList,
   229    fin
   230  # Above TRACE is anything that can create a trace event
   231  < TRACE
   232  < trace
   233  < traceStackTab;
   234  
   235  # panic is handled specially. It is implicitly below all other locks.
   236  NONE < panic;
   237  # deadlock is not acquired while holding panic, but it also needs to be
   238  # below all other locks.
   239  panic < deadlock;
   240  # raceFini is only held while exiting.
   241  panic < raceFini;
   242  
   243  # RWMutex internal read lock
   244  
   245  allocmR,
   246    allocmW
   247  < allocmRInternal;
   248  
   249  execR,
   250    execW
   251  < execRInternal;
   252  
   253  testR,
   254    testW
   255  < testRInternal;
   256  `
   257  
   258  // cyclicRanks lists lock ranks that allow multiple locks of the same
   259  // rank to be acquired simultaneously. The runtime enforces ordering
   260  // within these ranks using a separate mechanism.
   261  var cyclicRanks = map[string]bool{
   262  	// Multiple timers are locked simultaneously in destroy().
   263  	"timers": true,
   264  	// Multiple hchans are acquired in hchan.sortkey() order in
   265  	// select.
   266  	"hchan": true,
   267  	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
   268  	// syncadjustsudogs().
   269  	"hchanLeaf": true,
   270  	// The point of the deadlock lock is to deadlock.
   271  	"deadlock": true,
   272  }
   273  
   274  func main() {
   275  	flagO := flag.String("o", "", "write to `file` instead of stdout")
   276  	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
   277  	flag.Parse()
   278  	if flag.NArg() != 0 {
   279  		fmt.Fprintf(os.Stderr, "too many arguments")
   280  		os.Exit(2)
   281  	}
   282  
   283  	g, err := dag.Parse(ranks)
   284  	if err != nil {
   285  		log.Fatal(err)
   286  	}
   287  
   288  	var out []byte
   289  	if *flagDot {
   290  		var b bytes.Buffer
   291  		g.TransitiveReduction()
   292  		// Add cyclic edges for visualization.
   293  		for k := range cyclicRanks {
   294  			g.AddEdge(k, k)
   295  		}
   296  		// Reverse the graph. It's much easier to read this as
   297  		// a "<" partial order than a ">" partial order. This
   298  		// ways, locks are acquired from the top going down
   299  		// and time moves forward over the edges instead of
   300  		// backward.
   301  		g.Transpose()
   302  		generateDot(&b, g)
   303  		out = b.Bytes()
   304  	} else {
   305  		var b bytes.Buffer
   306  		generateGo(&b, g)
   307  		out, err = format.Source(b.Bytes())
   308  		if err != nil {
   309  			log.Fatal(err)
   310  		}
   311  	}
   312  
   313  	if *flagO != "" {
   314  		err = os.WriteFile(*flagO, out, 0666)
   315  	} else {
   316  		_, err = os.Stdout.Write(out)
   317  	}
   318  	if err != nil {
   319  		log.Fatal(err)
   320  	}
   321  }
   322  
   323  func generateGo(w io.Writer, g *dag.Graph) {
   324  	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
   325  
   326  package runtime
   327  
   328  type lockRank int
   329  
   330  `)
   331  
   332  	// Create numeric ranks.
   333  	topo := g.Topo()
   334  	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
   335  		topo[i], topo[j] = topo[j], topo[i]
   336  	}
   337  	fmt.Fprintf(w, `
   338  // Constants representing the ranks of all non-leaf runtime locks, in rank order.
   339  // Locks with lower rank must be taken before locks with higher rank,
   340  // in addition to satisfying the partial order in lockPartialOrder.
   341  // A few ranks allow self-cycles, which are specified in lockPartialOrder.
   342  const (
   343  	lockRankUnknown lockRank = iota
   344  
   345  `)
   346  	for _, rank := range topo {
   347  		if isPseudo(rank) {
   348  			fmt.Fprintf(w, "\t// %s\n", rank)
   349  		} else {
   350  			fmt.Fprintf(w, "\t%s\n", cname(rank))
   351  		}
   352  	}
   353  	fmt.Fprintf(w, `)
   354  
   355  // lockRankLeafRank is the rank of lock that does not have a declared rank,
   356  // and hence is a leaf lock.
   357  const lockRankLeafRank lockRank = 1000
   358  `)
   359  
   360  	// Create string table.
   361  	fmt.Fprintf(w, `
   362  // lockNames gives the names associated with each of the above ranks.
   363  var lockNames = []string{
   364  `)
   365  	for _, rank := range topo {
   366  		if !isPseudo(rank) {
   367  			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
   368  		}
   369  	}
   370  	fmt.Fprintf(w, `}
   371  
   372  func (rank lockRank) String() string {
   373  	if rank == 0 {
   374  		return "UNKNOWN"
   375  	}
   376  	if rank == lockRankLeafRank {
   377  		return "LEAF"
   378  	}
   379  	if rank < 0 || int(rank) >= len(lockNames) {
   380  		return "BAD RANK"
   381  	}
   382  	return lockNames[rank]
   383  }
   384  `)
   385  
   386  	// Create partial order structure.
   387  	fmt.Fprintf(w, `
   388  // lockPartialOrder is the transitive closure of the lock rank graph.
   389  // An entry for rank X lists all of the ranks that can already be held
   390  // when rank X is acquired.
   391  //
   392  // Lock ranks that allow self-cycles list themselves.
   393  var lockPartialOrder [][]lockRank = [][]lockRank{
   394  `)
   395  	for _, rank := range topo {
   396  		if isPseudo(rank) {
   397  			continue
   398  		}
   399  		list := []string{}
   400  		for _, before := range g.Edges(rank) {
   401  			if !isPseudo(before) {
   402  				list = append(list, cname(before))
   403  			}
   404  		}
   405  		if cyclicRanks[rank] {
   406  			list = append(list, cname(rank))
   407  		}
   408  
   409  		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
   410  	}
   411  	fmt.Fprintf(w, "}\n")
   412  }
   413  
   414  // cname returns the Go const name for the given lock rank label.
   415  func cname(label string) string {
   416  	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
   417  }
   418  
   419  func isPseudo(label string) bool {
   420  	return strings.ToUpper(label) == label
   421  }
   422  
   423  // generateDot emits a Graphviz dot representation of g to w.
   424  func generateDot(w io.Writer, g *dag.Graph) {
   425  	fmt.Fprintf(w, "digraph g {\n")
   426  
   427  	// Define all nodes.
   428  	for _, node := range g.Nodes {
   429  		fmt.Fprintf(w, "%q;\n", node)
   430  	}
   431  
   432  	// Create edges.
   433  	for _, node := range g.Nodes {
   434  		for _, to := range g.Edges(node) {
   435  			fmt.Fprintf(w, "%q -> %q;\n", node, to)
   436  		}
   437  	}
   438  
   439  	fmt.Fprintf(w, "}\n")
   440  }
   441  

View as plain text