Source file src/runtime/debug_test.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO: This test could be implemented on all (most?) UNIXes if we
     6  // added syscall.Tgkill more widely.
     7  
     8  // We skip all of these tests under race mode because our test thread
     9  // spends all of its time in the race runtime, which isn't a safe
    10  // point.
    11  
    12  //go:build (amd64 || arm64 || loong64 || ppc64le) && linux && !race
    13  
    14  package runtime_test
    15  
    16  import (
    17  	"fmt"
    18  	"internal/abi"
    19  	"internal/asan"
    20  	"internal/msan"
    21  	"math"
    22  	"os"
    23  	"regexp"
    24  	"runtime"
    25  	"runtime/debug"
    26  	"sync/atomic"
    27  	"syscall"
    28  	"testing"
    29  )
    30  
    31  func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
    32  	// This can deadlock if run under a debugger because it
    33  	// depends on catching SIGTRAP, which is usually swallowed by
    34  	// a debugger.
    35  	skipUnderDebugger(t)
    36  
    37  	// asan/msan instrumentation interferes with tests since we might
    38  	// inject debugCallV2 while in the asan/msan runtime. This is a
    39  	// problem for doing things like running the GC or taking stack
    40  	// traces. Not sure why this is happening yet, but skip for now.
    41  	if msan.Enabled || asan.Enabled {
    42  		t.Skip("debugCallV2 is injected erroneously during asan/msan runtime calls; skipping")
    43  	}
    44  
    45  	// This can deadlock if there aren't enough threads or if a GC
    46  	// tries to interrupt an atomic loop (see issue #10958). Execute
    47  	// an extra GC to ensure even the sweep phase is done (out of
    48  	// caution to prevent #49370 from happening).
    49  	// TODO(mknyszek): This extra GC cycle is likely unnecessary
    50  	// because preemption (which may happen during the sweep phase)
    51  	// isn't much of an issue anymore thanks to asynchronous preemption.
    52  	// The biggest risk is having a write barrier in the debug call
    53  	// injection test code fire, because it runs in a signal handler
    54  	// and may not have a P.
    55  	//
    56  	// We use 8 Ps so there's room for the debug call worker,
    57  	// something that's trying to preempt the call worker, and the
    58  	// goroutine that's trying to stop the call worker.
    59  	ogomaxprocs := runtime.GOMAXPROCS(8)
    60  	ogcpercent := debug.SetGCPercent(-1)
    61  	runtime.GC()
    62  
    63  	// ready is a buffered channel so debugCallWorker won't block
    64  	// on sending to it. This makes it less likely we'll catch
    65  	// debugCallWorker while it's in the runtime.
    66  	ready := make(chan *runtime.G, 1)
    67  	var stop uint32
    68  	done := make(chan error)
    69  	go debugCallWorker(ready, &stop, done)
    70  	g = <-ready
    71  	return g, func() {
    72  		atomic.StoreUint32(&stop, 1)
    73  		err := <-done
    74  		if err != nil {
    75  			t.Fatal(err)
    76  		}
    77  		runtime.GOMAXPROCS(ogomaxprocs)
    78  		debug.SetGCPercent(ogcpercent)
    79  	}
    80  }
    81  
    82  func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
    83  	runtime.LockOSThread()
    84  	defer runtime.UnlockOSThread()
    85  
    86  	ready <- runtime.Getg()
    87  
    88  	x := 2
    89  	debugCallWorker2(stop, &x)
    90  	if x != 1 {
    91  		done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
    92  	}
    93  	close(done)
    94  }
    95  
    96  // Don't inline this function, since we want to test adjusting
    97  // pointers in the arguments.
    98  //
    99  //go:noinline
   100  func debugCallWorker2(stop *uint32, x *int) {
   101  	for atomic.LoadUint32(stop) == 0 {
   102  		// Strongly encourage x to live in a register so we
   103  		// can test pointer register adjustment.
   104  		*x++
   105  	}
   106  	*x = 1
   107  }
   108  
   109  func debugCallTKill(tid int) error {
   110  	return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
   111  }
   112  
   113  // skipUnderDebugger skips the current test when running under a
   114  // debugger (specifically if this process has a tracer). This is
   115  // Linux-specific.
   116  func skipUnderDebugger(t *testing.T) {
   117  	pid := syscall.Getpid()
   118  	status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
   119  	if err != nil {
   120  		t.Logf("couldn't get proc tracer: %s", err)
   121  		return
   122  	}
   123  	re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
   124  	sub := re.FindSubmatch(status)
   125  	if sub == nil {
   126  		t.Logf("couldn't find proc tracer PID")
   127  		return
   128  	}
   129  	if string(sub[1]) == "0" {
   130  		return
   131  	}
   132  	t.Skip("test will deadlock under a debugger")
   133  }
   134  
   135  func TestDebugCall(t *testing.T) {
   136  	g, after := startDebugCallWorker(t)
   137  	defer after()
   138  
   139  	type stackArgs struct {
   140  		x0    int
   141  		x1    float64
   142  		y0Ret int
   143  		y1Ret float64
   144  	}
   145  
   146  	// Inject a call into the debugCallWorker goroutine and test
   147  	// basic argument and result passing.
   148  	fn := func(x int, y float64) (y0Ret int, y1Ret float64) {
   149  		return x + 1, y + 1.0
   150  	}
   151  	var args *stackArgs
   152  	var regs abi.RegArgs
   153  	intRegs := regs.Ints[:]
   154  	floatRegs := regs.Floats[:]
   155  	fval := float64(42.0)
   156  	if len(intRegs) > 0 {
   157  		intRegs[0] = 42
   158  		floatRegs[0] = math.Float64bits(fval)
   159  	} else {
   160  		args = &stackArgs{
   161  			x0: 42,
   162  			x1: 42.0,
   163  		}
   164  	}
   165  
   166  	if _, err := runtime.InjectDebugCall(g, fn, &regs, args, debugCallTKill, false); err != nil {
   167  		t.Fatal(err)
   168  	}
   169  	var result0 int
   170  	var result1 float64
   171  	if len(intRegs) > 0 {
   172  		result0 = int(intRegs[0])
   173  		result1 = math.Float64frombits(floatRegs[0])
   174  	} else {
   175  		result0 = args.y0Ret
   176  		result1 = args.y1Ret
   177  	}
   178  	if result0 != 43 {
   179  		t.Errorf("want 43, got %d", result0)
   180  	}
   181  	if result1 != fval+1 {
   182  		t.Errorf("want 43, got %f", result1)
   183  	}
   184  }
   185  
   186  func TestDebugCallLarge(t *testing.T) {
   187  	g, after := startDebugCallWorker(t)
   188  	defer after()
   189  
   190  	// Inject a call with a large call frame.
   191  	const N = 128
   192  	var args struct {
   193  		in  [N]int
   194  		out [N]int
   195  	}
   196  	fn := func(in [N]int) (out [N]int) {
   197  		for i := range in {
   198  			out[i] = in[i] + 1
   199  		}
   200  		return
   201  	}
   202  	var want [N]int
   203  	for i := range args.in {
   204  		args.in[i] = i
   205  		want[i] = i + 1
   206  	}
   207  	if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil {
   208  		t.Fatal(err)
   209  	}
   210  	if want != args.out {
   211  		t.Fatalf("want %v, got %v", want, args.out)
   212  	}
   213  }
   214  
   215  func TestDebugCallGC(t *testing.T) {
   216  	g, after := startDebugCallWorker(t)
   217  	defer after()
   218  
   219  	// Inject a call that performs a GC.
   220  	if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil {
   221  		t.Fatal(err)
   222  	}
   223  }
   224  
   225  func TestDebugCallGrowStack(t *testing.T) {
   226  	g, after := startDebugCallWorker(t)
   227  	defer after()
   228  
   229  	// Inject a call that grows the stack. debugCallWorker checks
   230  	// for stack pointer breakage.
   231  	if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
   232  		t.Fatal(err)
   233  	}
   234  }
   235  
   236  //go:nosplit
   237  func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
   238  	// The nosplit causes this function to not contain safe-points
   239  	// except at calls.
   240  	runtime.LockOSThread()
   241  	defer runtime.UnlockOSThread()
   242  
   243  	*gpp = runtime.Getg()
   244  
   245  	for atomic.LoadUint32(stop) == 0 {
   246  		atomic.StoreUint32(ready, 1)
   247  	}
   248  }
   249  
   250  func TestDebugCallUnsafePoint(t *testing.T) {
   251  	skipUnderDebugger(t)
   252  
   253  	// This can deadlock if there aren't enough threads or if a GC
   254  	// tries to interrupt an atomic loop (see issue #10958).
   255  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
   256  
   257  	// InjectDebugCall cannot be executed while a GC is actively in
   258  	// progress. Wait until the current GC is done, and turn it off.
   259  	//
   260  	// See #49370.
   261  	runtime.GC()
   262  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   263  
   264  	// Test that the runtime refuses call injection at unsafe points.
   265  	var g *runtime.G
   266  	var ready, stop uint32
   267  	defer atomic.StoreUint32(&stop, 1)
   268  	go debugCallUnsafePointWorker(&g, &ready, &stop)
   269  	for atomic.LoadUint32(&ready) == 0 {
   270  		runtime.Gosched()
   271  	}
   272  
   273  	_, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true)
   274  	if msg := "call not at safe point"; err == nil || err.Error() != msg {
   275  		t.Fatalf("want %q, got %s", msg, err)
   276  	}
   277  }
   278  
   279  func TestDebugCallPanic(t *testing.T) {
   280  	skipUnderDebugger(t)
   281  
   282  	// This can deadlock if there aren't enough threads.
   283  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
   284  
   285  	// InjectDebugCall cannot be executed while a GC is actively in
   286  	// progress. Wait until the current GC is done, and turn it off.
   287  	//
   288  	// See #10958 and #49370.
   289  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   290  	// TODO(mknyszek): This extra GC cycle is likely unnecessary
   291  	// because preemption (which may happen during the sweep phase)
   292  	// isn't much of an issue anymore thanks to asynchronous preemption.
   293  	// The biggest risk is having a write barrier in the debug call
   294  	// injection test code fire, because it runs in a signal handler
   295  	// and may not have a P.
   296  	runtime.GC()
   297  
   298  	ready := make(chan *runtime.G)
   299  	var stop uint32
   300  	defer atomic.StoreUint32(&stop, 1)
   301  	go func() {
   302  		runtime.LockOSThread()
   303  		defer runtime.UnlockOSThread()
   304  		ready <- runtime.Getg()
   305  		for atomic.LoadUint32(&stop) == 0 {
   306  		}
   307  	}()
   308  	g := <-ready
   309  
   310  	p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false)
   311  	if err != nil {
   312  		t.Fatal(err)
   313  	}
   314  	if ps, ok := p.(string); !ok || ps != "test" {
   315  		t.Fatalf("wanted panic %v, got %v", "test", p)
   316  	}
   317  }
   318  

View as plain text