Source file src/runtime/debugcall.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Though the debug call function feature is not enabled on
     6  // ppc64, inserted ppc64 to avoid missing Go declaration error
     7  // for debugCallPanicked while building runtime.test
     8  //go:build amd64 || arm64 || loong64 || ppc64le || ppc64
     9  
    10  package runtime
    11  
    12  import (
    13  	"internal/abi"
    14  	"internal/runtime/sys"
    15  	"unsafe"
    16  )
    17  
    18  const (
    19  	debugCallSystemStack = "executing on Go runtime stack"
    20  	debugCallUnknownFunc = "call from unknown function"
    21  	debugCallRuntime     = "call from within the Go runtime"
    22  	debugCallUnsafePoint = "call not at safe point"
    23  )
    24  
    25  func debugCallV2()
    26  func debugCallPanicked(val any)
    27  
    28  // debugCallCheck checks whether it is safe to inject a debugger
    29  // function call with return PC pc. If not, it returns a string
    30  // explaining why.
    31  //
    32  //go:nosplit
    33  func debugCallCheck(pc uintptr) string {
    34  	// No user calls from the system stack.
    35  	if getg() != getg().m.curg {
    36  		return debugCallSystemStack
    37  	}
    38  	if sp := sys.GetCallerSP(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
    39  		// Fast syscalls (nanotime) and racecall switch to the
    40  		// g0 stack without switching g. We can't safely make
    41  		// a call in this state. (We can't even safely
    42  		// systemstack.)
    43  		return debugCallSystemStack
    44  	}
    45  
    46  	// Switch to the system stack to avoid overflowing the user
    47  	// stack.
    48  	var ret string
    49  	systemstack(func() {
    50  		f := findfunc(pc)
    51  		if !f.valid() {
    52  			ret = debugCallUnknownFunc
    53  			return
    54  		}
    55  
    56  		name := funcname(f)
    57  
    58  		switch name {
    59  		case "debugCall32",
    60  			"debugCall64",
    61  			"debugCall128",
    62  			"debugCall256",
    63  			"debugCall512",
    64  			"debugCall1024",
    65  			"debugCall2048",
    66  			"debugCall4096",
    67  			"debugCall8192",
    68  			"debugCall16384",
    69  			"debugCall32768",
    70  			"debugCall65536":
    71  			// These functions are allowed so that the debugger can initiate multiple function calls.
    72  			// See: https://golang.org/cl/161137/
    73  			return
    74  		}
    75  
    76  		// Disallow calls from the runtime. We could
    77  		// potentially make this condition tighter (e.g., not
    78  		// when locks are held), but there are enough tightly
    79  		// coded sequences (e.g., defer handling) that it's
    80  		// better to play it safe.
    81  		if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
    82  			ret = debugCallRuntime
    83  			return
    84  		}
    85  
    86  		// Check that this isn't an unsafe-point.
    87  		if pc != f.entry() {
    88  			pc--
    89  		}
    90  		up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
    91  		if up != abi.UnsafePointSafe {
    92  			// Not at a safe point.
    93  			ret = debugCallUnsafePoint
    94  		}
    95  	})
    96  	return ret
    97  }
    98  
    99  // debugCallWrap starts a new goroutine to run a debug call and blocks
   100  // the calling goroutine. On the goroutine, it prepares to recover
   101  // panics from the debug call, and then calls the call dispatching
   102  // function at PC dispatch.
   103  //
   104  // This must be deeply nosplit because there are untyped values on the
   105  // stack from debugCallV2.
   106  //
   107  //go:nosplit
   108  func debugCallWrap(dispatch uintptr) {
   109  	var lockedExt uint32
   110  	callerpc := sys.GetCallerPC()
   111  	gp := getg()
   112  
   113  	// Lock ourselves to the OS thread.
   114  	//
   115  	// Debuggers rely on us running on the same thread until we get to
   116  	// dispatch the function they asked as to.
   117  	//
   118  	// We're going to transfer this to the new G we just created.
   119  	lockOSThread()
   120  
   121  	// Create a new goroutine to execute the call on. Run this on
   122  	// the system stack to avoid growing our stack.
   123  	systemstack(func() {
   124  		// TODO(mknyszek): It would be nice to wrap these arguments in an allocated
   125  		// closure and start the goroutine with that closure, but the compiler disallows
   126  		// implicit closure allocation in the runtime.
   127  		fn := debugCallWrap1
   128  		newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc, false, waitReasonZero)
   129  		args := &debugCallWrapArgs{
   130  			dispatch: dispatch,
   131  			callingG: gp,
   132  		}
   133  		newg.param = unsafe.Pointer(args)
   134  
   135  		// Transfer locked-ness to the new goroutine.
   136  		// Save lock state to restore later.
   137  		mp := gp.m
   138  		if mp != gp.lockedm.ptr() {
   139  			throw("inconsistent lockedm")
   140  		}
   141  		// Save the external lock count and clear it so
   142  		// that it can't be unlocked from the debug call.
   143  		// Note: we already locked internally to the thread,
   144  		// so if we were locked before we're still locked now.
   145  		lockedExt = mp.lockedExt
   146  		mp.lockedExt = 0
   147  
   148  		mp.lockedg.set(newg)
   149  		newg.lockedm.set(mp)
   150  		gp.lockedm = 0
   151  
   152  		// Mark the calling goroutine as being at an async
   153  		// safe-point, since it has a few conservative frames
   154  		// at the bottom of the stack. This also prevents
   155  		// stack shrinks.
   156  		gp.asyncSafePoint = true
   157  
   158  		// Stash newg away so we can execute it below (mcall's
   159  		// closure can't capture anything).
   160  		gp.schedlink.set(newg)
   161  	})
   162  
   163  	// Switch to the new goroutine.
   164  	mcall(func(gp *g) {
   165  		// Get newg.
   166  		newg := gp.schedlink.ptr()
   167  		gp.schedlink = 0
   168  
   169  		// Park the calling goroutine.
   170  		trace := traceAcquire()
   171  		if trace.ok() {
   172  			// Trace the event before the transition. It may take a
   173  			// stack trace, but we won't own the stack after the
   174  			// transition anymore.
   175  			trace.GoPark(traceBlockDebugCall, 1)
   176  		}
   177  		casGToWaiting(gp, _Grunning, waitReasonDebugCall)
   178  		if trace.ok() {
   179  			traceRelease(trace)
   180  		}
   181  		dropg()
   182  
   183  		// Directly execute the new goroutine. The debug
   184  		// protocol will continue on the new goroutine, so
   185  		// it's important we not just let the scheduler do
   186  		// this or it may resume a different goroutine.
   187  		execute(newg, true)
   188  	})
   189  
   190  	// We'll resume here when the call returns.
   191  
   192  	// Restore locked state.
   193  	mp := gp.m
   194  	mp.lockedExt = lockedExt
   195  	mp.lockedg.set(gp)
   196  	gp.lockedm.set(mp)
   197  
   198  	// Undo the lockOSThread we did earlier.
   199  	unlockOSThread()
   200  
   201  	gp.asyncSafePoint = false
   202  }
   203  
   204  type debugCallWrapArgs struct {
   205  	dispatch uintptr
   206  	callingG *g
   207  }
   208  
   209  // debugCallWrap1 is the continuation of debugCallWrap on the callee
   210  // goroutine.
   211  func debugCallWrap1() {
   212  	gp := getg()
   213  	args := (*debugCallWrapArgs)(gp.param)
   214  	dispatch, callingG := args.dispatch, args.callingG
   215  	gp.param = nil
   216  
   217  	// Dispatch call and trap panics.
   218  	debugCallWrap2(dispatch)
   219  
   220  	// Resume the caller goroutine.
   221  	getg().schedlink.set(callingG)
   222  	mcall(func(gp *g) {
   223  		callingG := gp.schedlink.ptr()
   224  		gp.schedlink = 0
   225  
   226  		// Unlock this goroutine from the M if necessary. The
   227  		// calling G will relock.
   228  		if gp.lockedm != 0 {
   229  			gp.lockedm = 0
   230  			gp.m.lockedg = 0
   231  		}
   232  
   233  		// Switch back to the calling goroutine. At some point
   234  		// the scheduler will schedule us again and we'll
   235  		// finish exiting.
   236  		trace := traceAcquire()
   237  		if trace.ok() {
   238  			// Trace the event before the transition. It may take a
   239  			// stack trace, but we won't own the stack after the
   240  			// transition anymore.
   241  			trace.GoSched()
   242  		}
   243  		casgstatus(gp, _Grunning, _Grunnable)
   244  		if trace.ok() {
   245  			traceRelease(trace)
   246  		}
   247  		dropg()
   248  		lock(&sched.lock)
   249  		globrunqput(gp)
   250  		unlock(&sched.lock)
   251  
   252  		trace = traceAcquire()
   253  		casgstatus(callingG, _Gwaiting, _Grunnable)
   254  		if trace.ok() {
   255  			trace.GoUnpark(callingG, 0)
   256  			traceRelease(trace)
   257  		}
   258  		execute(callingG, true)
   259  	})
   260  }
   261  
   262  func debugCallWrap2(dispatch uintptr) {
   263  	// Call the dispatch function and trap panics.
   264  	var dispatchF func()
   265  	dispatchFV := funcval{dispatch}
   266  	*(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
   267  
   268  	var ok bool
   269  	defer func() {
   270  		if !ok {
   271  			err := recover()
   272  			debugCallPanicked(err)
   273  		}
   274  	}()
   275  	dispatchF()
   276  	ok = true
   277  }
   278  

View as plain text