Source file src/runtime/lockrank_on.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const staticLockRanking = true
    15  
    16  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    17  // stopped.
    18  var worldIsStopped atomic.Uint32
    19  
    20  // lockRankStruct is embedded in mutex
    21  type lockRankStruct struct {
    22  	// static lock ranking of the lock
    23  	rank lockRank
    24  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    25  	// 32-bit systems.
    26  	pad int
    27  }
    28  
    29  // lockInit(l *mutex, rank int) sets the rank of lock before it is used.
    30  // If there is no clear place to initialize a lock, then the rank of a lock can be
    31  // specified during the lock call itself via lockWithRank(l *mutex, rank int).
    32  func lockInit(l *mutex, rank lockRank) {
    33  	l.rank = rank
    34  }
    35  
    36  func getLockRank(l *mutex) lockRank {
    37  	return l.rank
    38  }
    39  
    40  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    41  // when acquiring a non-static lock.
    42  //
    43  // Note that we need to be careful about stack splits:
    44  //
    45  // This function is not nosplit, thus it may split at function entry. This may
    46  // introduce a new edge in the lock order, but it is no different from any
    47  // other (nosplit) call before this call (including the call to lock() itself).
    48  //
    49  // However, we switch to the systemstack to record the lock held to ensure that
    50  // we record an accurate lock ordering. e.g., without systemstack, a stack
    51  // split on entry to lock2() would record stack split locks as taken after l,
    52  // even though l is not actually locked yet.
    53  func lockWithRank(l *mutex, rank lockRank) {
    54  	if l == &debuglock || l == &paniclk || l == &raceFiniLock {
    55  		// debuglock is only used for println/printlock(). Don't do lock
    56  		// rank recording for it, since print/println are used when
    57  		// printing out a lock ordering problem below.
    58  		//
    59  		// paniclk is only used for fatal throw/panic. Don't do lock
    60  		// ranking recording for it, since we throw after reporting a
    61  		// lock ordering problem. Additionally, paniclk may be taken
    62  		// after effectively any lock (anywhere we might panic), which
    63  		// the partial order doesn't cover.
    64  		//
    65  		// raceFiniLock is held while exiting when running
    66  		// the race detector. Don't do lock rank recording for it,
    67  		// since we are exiting.
    68  		lock2(l)
    69  		return
    70  	}
    71  	if rank == 0 {
    72  		rank = lockRankLeafRank
    73  	}
    74  	gp := getg()
    75  	// Log the new class.
    76  	systemstack(func() {
    77  		i := gp.m.locksHeldLen
    78  		if i >= len(gp.m.locksHeld) {
    79  			throw("too many locks held concurrently for rank checking")
    80  		}
    81  		gp.m.locksHeld[i].rank = rank
    82  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    83  		gp.m.locksHeldLen++
    84  
    85  		// i is the index of the lock being acquired
    86  		if i > 0 {
    87  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    88  		}
    89  		lock2(l)
    90  	})
    91  }
    92  
    93  // nosplit to ensure it can be called in as many contexts as possible.
    94  //
    95  //go:nosplit
    96  func printHeldLocks(gp *g) {
    97  	if gp.m.locksHeldLen == 0 {
    98  		println("<none>")
    99  		return
   100  	}
   101  
   102  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
   103  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
   104  	}
   105  }
   106  
   107  // acquireLockRankAndM acquires a rank which is not associated with a mutex
   108  // lock. To maintain the invariant that an M with m.locks==0 does not hold any
   109  // lock-like resources, it also acquires the M.
   110  //
   111  // This function may be called in nosplit context and thus must be nosplit.
   112  //
   113  //go:nosplit
   114  func acquireLockRankAndM(rank lockRank) {
   115  	acquirem()
   116  
   117  	gp := getg()
   118  	// Log the new class. See comment on lockWithRank.
   119  	systemstack(func() {
   120  		i := gp.m.locksHeldLen
   121  		if i >= len(gp.m.locksHeld) {
   122  			throw("too many locks held concurrently for rank checking")
   123  		}
   124  		gp.m.locksHeld[i].rank = rank
   125  		gp.m.locksHeld[i].lockAddr = 0
   126  		gp.m.locksHeldLen++
   127  
   128  		// i is the index of the lock being acquired
   129  		if i > 0 {
   130  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   131  		}
   132  	})
   133  }
   134  
   135  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   136  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   137  //
   138  //go:systemstack
   139  func checkRanks(gp *g, prevRank, rank lockRank) {
   140  	rankOK := false
   141  	if rank < prevRank {
   142  		// If rank < prevRank, then we definitely have a rank error
   143  		rankOK = false
   144  	} else if rank == lockRankLeafRank {
   145  		// If new lock is a leaf lock, then the preceding lock can
   146  		// be anything except another leaf lock.
   147  		rankOK = prevRank < lockRankLeafRank
   148  	} else {
   149  		// We've now verified the total lock ranking, but we
   150  		// also enforce the partial ordering specified by
   151  		// lockPartialOrder as well. Two locks with the same rank
   152  		// can only be acquired at the same time if explicitly
   153  		// listed in the lockPartialOrder table.
   154  		list := lockPartialOrder[rank]
   155  		for _, entry := range list {
   156  			if entry == prevRank {
   157  				rankOK = true
   158  				break
   159  			}
   160  		}
   161  	}
   162  	if !rankOK {
   163  		printlock()
   164  		println(gp.m.procid, " ======")
   165  		printHeldLocks(gp)
   166  		throw("lock ordering problem")
   167  	}
   168  }
   169  
   170  // See comment on lockWithRank regarding stack splitting.
   171  func unlockWithRank(l *mutex) {
   172  	if l == &debuglock || l == &paniclk || l == &raceFiniLock {
   173  		// See comment at beginning of lockWithRank.
   174  		unlock2(l)
   175  		return
   176  	}
   177  	gp := getg()
   178  	systemstack(func() {
   179  		found := false
   180  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   181  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   182  				found = true
   183  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   184  				gp.m.locksHeldLen--
   185  				break
   186  			}
   187  		}
   188  		if !found {
   189  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   190  			throw("unlock without matching lock acquire")
   191  		}
   192  		unlock2(l)
   193  	})
   194  }
   195  
   196  // releaseLockRankAndM releases a rank which is not associated with a mutex
   197  // lock. To maintain the invariant that an M with m.locks==0 does not hold any
   198  // lock-like resources, it also releases the M.
   199  //
   200  // This function may be called in nosplit context and thus must be nosplit.
   201  //
   202  //go:nosplit
   203  func releaseLockRankAndM(rank lockRank) {
   204  	gp := getg()
   205  	systemstack(func() {
   206  		found := false
   207  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   208  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   209  				found = true
   210  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   211  				gp.m.locksHeldLen--
   212  				break
   213  			}
   214  		}
   215  		if !found {
   216  			println(gp.m.procid, ":", rank.String(), rank)
   217  			throw("lockRank release without matching lockRank acquire")
   218  		}
   219  	})
   220  
   221  	releasem(getg().m)
   222  }
   223  
   224  // nosplit because it may be called from nosplit contexts.
   225  //
   226  //go:nosplit
   227  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   228  	gp := getg()
   229  	if gp.m.locksHeldLen == 0 {
   230  		// No possibility of lock ordering problem if no other locks held
   231  		return
   232  	}
   233  
   234  	systemstack(func() {
   235  		i := gp.m.locksHeldLen
   236  		if i >= len(gp.m.locksHeld) {
   237  			throw("too many locks held concurrently for rank checking")
   238  		}
   239  		// Temporarily add this lock to the locksHeld list, so
   240  		// checkRanks() will print out list, including this lock, if there
   241  		// is a lock ordering problem.
   242  		gp.m.locksHeld[i].rank = rank
   243  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   244  		gp.m.locksHeldLen++
   245  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   246  		gp.m.locksHeldLen--
   247  	})
   248  }
   249  
   250  // nosplit to ensure it can be called in as many contexts as possible.
   251  //
   252  //go:nosplit
   253  func checkLockHeld(gp *g, l *mutex) bool {
   254  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   255  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   256  			return true
   257  		}
   258  	}
   259  	return false
   260  }
   261  
   262  // assertLockHeld throws if l is not held by the caller.
   263  //
   264  // nosplit to ensure it can be called in as many contexts as possible.
   265  //
   266  //go:nosplit
   267  func assertLockHeld(l *mutex) {
   268  	gp := getg()
   269  
   270  	held := checkLockHeld(gp, l)
   271  	if held {
   272  		return
   273  	}
   274  
   275  	// Crash from system stack to avoid splits that may cause
   276  	// additional issues.
   277  	systemstack(func() {
   278  		printlock()
   279  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   280  		printHeldLocks(gp)
   281  		throw("not holding required lock!")
   282  	})
   283  }
   284  
   285  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   286  //
   287  // This is less precise than assertLockHeld, but can be used in places where a
   288  // pointer to the exact mutex is not available.
   289  //
   290  // nosplit to ensure it can be called in as many contexts as possible.
   291  //
   292  //go:nosplit
   293  func assertRankHeld(r lockRank) {
   294  	gp := getg()
   295  
   296  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   297  		if gp.m.locksHeld[i].rank == r {
   298  			return
   299  		}
   300  	}
   301  
   302  	// Crash from system stack to avoid splits that may cause
   303  	// additional issues.
   304  	systemstack(func() {
   305  		printlock()
   306  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   307  		printHeldLocks(gp)
   308  		throw("not holding required lock!")
   309  	})
   310  }
   311  
   312  // worldStopped notes that the world is stopped.
   313  //
   314  // Caller must hold worldsema.
   315  //
   316  // nosplit to ensure it can be called in as many contexts as possible.
   317  //
   318  //go:nosplit
   319  func worldStopped() {
   320  	if stopped := worldIsStopped.Add(1); stopped != 1 {
   321  		systemstack(func() {
   322  			print("world stop count=", stopped, "\n")
   323  			throw("recursive world stop")
   324  		})
   325  	}
   326  }
   327  
   328  // worldStarted that the world is starting.
   329  //
   330  // Caller must hold worldsema.
   331  //
   332  // nosplit to ensure it can be called in as many contexts as possible.
   333  //
   334  //go:nosplit
   335  func worldStarted() {
   336  	if stopped := worldIsStopped.Add(-1); stopped != 0 {
   337  		systemstack(func() {
   338  			print("world stop count=", stopped, "\n")
   339  			throw("released non-stopped world stop")
   340  		})
   341  	}
   342  }
   343  
   344  // nosplit to ensure it can be called in as many contexts as possible.
   345  //
   346  //go:nosplit
   347  func checkWorldStopped() bool {
   348  	stopped := worldIsStopped.Load()
   349  	if stopped > 1 {
   350  		systemstack(func() {
   351  			print("inconsistent world stop count=", stopped, "\n")
   352  			throw("inconsistent world stop count")
   353  		})
   354  	}
   355  
   356  	return stopped == 1
   357  }
   358  
   359  // assertWorldStopped throws if the world is not stopped. It does not check
   360  // which M stopped the world.
   361  //
   362  // nosplit to ensure it can be called in as many contexts as possible.
   363  //
   364  //go:nosplit
   365  func assertWorldStopped() {
   366  	if checkWorldStopped() {
   367  		return
   368  	}
   369  
   370  	throw("world not stopped")
   371  }
   372  
   373  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   374  // passed lock is not held.
   375  //
   376  // nosplit to ensure it can be called in as many contexts as possible.
   377  //
   378  //go:nosplit
   379  func assertWorldStoppedOrLockHeld(l *mutex) {
   380  	if checkWorldStopped() {
   381  		return
   382  	}
   383  
   384  	gp := getg()
   385  	held := checkLockHeld(gp, l)
   386  	if held {
   387  		return
   388  	}
   389  
   390  	// Crash from system stack to avoid splits that may cause
   391  	// additional issues.
   392  	systemstack(func() {
   393  		printlock()
   394  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   395  		println("<no world stop>")
   396  		printHeldLocks(gp)
   397  		throw("no world stop or required lock!")
   398  	})
   399  }
   400  

View as plain text