Source file src/runtime/runtime_test.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"io"
    11  	. "runtime"
    12  	"runtime/debug"
    13  	"slices"
    14  	"strings"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  	"unsafe"
    19  )
    20  
    21  // flagQuick is set by the -quick option to skip some relatively slow tests.
    22  // This is used by the cmd/dist test runtime:cpu124.
    23  // The cmd/dist test passes both -test.short and -quick;
    24  // there are tests that only check testing.Short, and those tests will
    25  // not be skipped if only -quick is used.
    26  var flagQuick = flag.Bool("quick", false, "skip slow tests, for cmd/dist test runtime:cpu124")
    27  
    28  func init() {
    29  	// We're testing the runtime, so make tracebacks show things
    30  	// in the runtime. This only raises the level, so it won't
    31  	// override GOTRACEBACK=crash from the user.
    32  	SetTracebackEnv("system")
    33  }
    34  
    35  var errf error
    36  
    37  func errfn() error {
    38  	return errf
    39  }
    40  
    41  func errfn1() error {
    42  	return io.EOF
    43  }
    44  
    45  func BenchmarkIfaceCmp100(b *testing.B) {
    46  	for i := 0; i < b.N; i++ {
    47  		for j := 0; j < 100; j++ {
    48  			if errfn() == io.EOF {
    49  				b.Fatal("bad comparison")
    50  			}
    51  		}
    52  	}
    53  }
    54  
    55  func BenchmarkIfaceCmpNil100(b *testing.B) {
    56  	for i := 0; i < b.N; i++ {
    57  		for j := 0; j < 100; j++ {
    58  			if errfn1() == nil {
    59  				b.Fatal("bad comparison")
    60  			}
    61  		}
    62  	}
    63  }
    64  
    65  var efaceCmp1 any
    66  var efaceCmp2 any
    67  
    68  func BenchmarkEfaceCmpDiff(b *testing.B) {
    69  	x := 5
    70  	efaceCmp1 = &x
    71  	y := 6
    72  	efaceCmp2 = &y
    73  	for i := 0; i < b.N; i++ {
    74  		for j := 0; j < 100; j++ {
    75  			if efaceCmp1 == efaceCmp2 {
    76  				b.Fatal("bad comparison")
    77  			}
    78  		}
    79  	}
    80  }
    81  
    82  func BenchmarkEfaceCmpDiffIndirect(b *testing.B) {
    83  	efaceCmp1 = [2]int{1, 2}
    84  	efaceCmp2 = [2]int{1, 2}
    85  	for i := 0; i < b.N; i++ {
    86  		for j := 0; j < 100; j++ {
    87  			if efaceCmp1 != efaceCmp2 {
    88  				b.Fatal("bad comparison")
    89  			}
    90  		}
    91  	}
    92  }
    93  
    94  func BenchmarkDefer(b *testing.B) {
    95  	for i := 0; i < b.N; i++ {
    96  		defer1()
    97  	}
    98  }
    99  
   100  func defer1() {
   101  	defer func(x, y, z int) {
   102  		if recover() != nil || x != 1 || y != 2 || z != 3 {
   103  			panic("bad recover")
   104  		}
   105  	}(1, 2, 3)
   106  }
   107  
   108  func BenchmarkDefer10(b *testing.B) {
   109  	for i := 0; i < b.N/10; i++ {
   110  		defer2()
   111  	}
   112  }
   113  
   114  func defer2() {
   115  	for i := 0; i < 10; i++ {
   116  		defer func(x, y, z int) {
   117  			if recover() != nil || x != 1 || y != 2 || z != 3 {
   118  				panic("bad recover")
   119  			}
   120  		}(1, 2, 3)
   121  	}
   122  }
   123  
   124  func BenchmarkDeferMany(b *testing.B) {
   125  	for i := 0; i < b.N; i++ {
   126  		defer func(x, y, z int) {
   127  			if recover() != nil || x != 1 || y != 2 || z != 3 {
   128  				panic("bad recover")
   129  			}
   130  		}(1, 2, 3)
   131  	}
   132  }
   133  
   134  func BenchmarkPanicRecover(b *testing.B) {
   135  	for i := 0; i < b.N; i++ {
   136  		defer3()
   137  	}
   138  }
   139  
   140  func defer3() {
   141  	defer func(x, y, z int) {
   142  		if recover() == nil {
   143  			panic("failed recover")
   144  		}
   145  	}(1, 2, 3)
   146  	panic("hi")
   147  }
   148  
   149  // golang.org/issue/7063
   150  func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
   151  	SetCPUProfileRate(0)
   152  }
   153  
   154  // Addresses to test for faulting behavior.
   155  // This is less a test of SetPanicOnFault and more a check that
   156  // the operating system and the runtime can process these faults
   157  // correctly. That is, we're indirectly testing that without SetPanicOnFault
   158  // these would manage to turn into ordinary crashes.
   159  // Note that these are truncated on 32-bit systems, so the bottom 32 bits
   160  // of the larger addresses must themselves be invalid addresses.
   161  // We might get unlucky and the OS might have mapped one of these
   162  // addresses, but probably not: they're all in the first page, very high
   163  // addresses that normally an OS would reserve for itself, or malformed
   164  // addresses. Even so, we might have to remove one or two on different
   165  // systems. We will see.
   166  
   167  var faultAddrs = []uint64{
   168  	// low addresses
   169  	0,
   170  	1,
   171  	0xfff,
   172  	// high (kernel) addresses
   173  	// or else malformed.
   174  	0xffffffffffffffff,
   175  	0xfffffffffffff001,
   176  	0xffffffffffff0001,
   177  	0xfffffffffff00001,
   178  	0xffffffffff000001,
   179  	0xfffffffff0000001,
   180  	0xffffffff00000001,
   181  	0xfffffff000000001,
   182  	0xffffff0000000001,
   183  	0xfffff00000000001,
   184  	0xffff000000000001,
   185  	0xfff0000000000001,
   186  	0xff00000000000001,
   187  	0xf000000000000001,
   188  	0x8000000000000001,
   189  }
   190  
   191  func TestSetPanicOnFault(t *testing.T) {
   192  	old := debug.SetPanicOnFault(true)
   193  	defer debug.SetPanicOnFault(old)
   194  
   195  	nfault := 0
   196  	for _, addr := range faultAddrs {
   197  		testSetPanicOnFault(t, uintptr(addr), &nfault)
   198  	}
   199  	if nfault == 0 {
   200  		t.Fatalf("none of the addresses faulted")
   201  	}
   202  }
   203  
   204  // testSetPanicOnFault tests one potentially faulting address.
   205  // It deliberately constructs and uses an invalid pointer,
   206  // so mark it as nocheckptr.
   207  //
   208  //go:nocheckptr
   209  func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {
   210  	if GOOS == "js" || GOOS == "wasip1" {
   211  		t.Skip(GOOS + " does not support catching faults")
   212  	}
   213  
   214  	defer func() {
   215  		if err := recover(); err != nil {
   216  			*nfault++
   217  		}
   218  	}()
   219  
   220  	// The read should fault, except that sometimes we hit
   221  	// addresses that have had C or kernel pages mapped there
   222  	// readable by user code. So just log the content.
   223  	// If no addresses fault, we'll fail the test.
   224  	v := *(*byte)(unsafe.Pointer(addr))
   225  	t.Logf("addr %#x: %#x\n", addr, v)
   226  }
   227  
   228  func eqstring_generic(s1, s2 string) bool {
   229  	if len(s1) != len(s2) {
   230  		return false
   231  	}
   232  	// optimization in assembly versions:
   233  	// if s1.str == s2.str { return true }
   234  	for i := 0; i < len(s1); i++ {
   235  		if s1[i] != s2[i] {
   236  			return false
   237  		}
   238  	}
   239  	return true
   240  }
   241  
   242  func TestEqString(t *testing.T) {
   243  	// This isn't really an exhaustive test of == on strings, it's
   244  	// just a convenient way of documenting (via eqstring_generic)
   245  	// what == does.
   246  	s := []string{
   247  		"",
   248  		"a",
   249  		"c",
   250  		"aaa",
   251  		"ccc",
   252  		"cccc"[:3], // same contents, different string
   253  		"1234567890",
   254  	}
   255  	for _, s1 := range s {
   256  		for _, s2 := range s {
   257  			x := s1 == s2
   258  			y := eqstring_generic(s1, s2)
   259  			if x != y {
   260  				t.Errorf(`("%s" == "%s") = %t, want %t`, s1, s2, x, y)
   261  			}
   262  		}
   263  	}
   264  }
   265  
   266  func TestTrailingZero(t *testing.T) {
   267  	// make sure we add padding for structs with trailing zero-sized fields
   268  	type T1 struct {
   269  		n int32
   270  		z [0]byte
   271  	}
   272  	if unsafe.Sizeof(T1{}) != 8 {
   273  		t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{}))
   274  	}
   275  	type T2 struct {
   276  		n int64
   277  		z struct{}
   278  	}
   279  	if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(uintptr(0)) {
   280  		t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(uintptr(0)))
   281  	}
   282  	type T3 struct {
   283  		n byte
   284  		z [4]struct{}
   285  	}
   286  	if unsafe.Sizeof(T3{}) != 2 {
   287  		t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{}))
   288  	}
   289  	// make sure padding can double for both zerosize and alignment
   290  	type T4 struct {
   291  		a int32
   292  		b int16
   293  		c int8
   294  		z struct{}
   295  	}
   296  	if unsafe.Sizeof(T4{}) != 8 {
   297  		t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{}))
   298  	}
   299  	// make sure we don't pad a zero-sized thing
   300  	type T5 struct {
   301  	}
   302  	if unsafe.Sizeof(T5{}) != 0 {
   303  		t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{}))
   304  	}
   305  }
   306  
   307  func TestAppendGrowth(t *testing.T) {
   308  	var x []int64
   309  	check := func(want int) {
   310  		if cap(x) != want {
   311  			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
   312  		}
   313  	}
   314  
   315  	check(0)
   316  	want := 1
   317  	for i := 1; i <= 100; i++ {
   318  		x = append(x, 1)
   319  		check(want)
   320  		if i&(i-1) == 0 {
   321  			want = 2 * i
   322  		}
   323  	}
   324  }
   325  
   326  var One = []int64{1}
   327  
   328  func TestAppendSliceGrowth(t *testing.T) {
   329  	var x []int64
   330  	check := func(want int) {
   331  		if cap(x) != want {
   332  			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
   333  		}
   334  	}
   335  
   336  	check(0)
   337  	want := 1
   338  	for i := 1; i <= 100; i++ {
   339  		x = append(x, One...)
   340  		check(want)
   341  		if i&(i-1) == 0 {
   342  			want = 2 * i
   343  		}
   344  	}
   345  }
   346  
   347  func TestGoroutineProfileTrivial(t *testing.T) {
   348  	// Calling GoroutineProfile twice in a row should find the same number of goroutines,
   349  	// but it's possible there are goroutines just about to exit, so we might end up
   350  	// with fewer in the second call. Try a few times; it should converge once those
   351  	// zombies are gone.
   352  	for i := 0; ; i++ {
   353  		n1, ok := GoroutineProfile(nil) // should fail, there's at least 1 goroutine
   354  		if n1 < 1 || ok {
   355  			t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok)
   356  		}
   357  		n2, ok := GoroutineProfile(make([]StackRecord, n1))
   358  		if n2 == n1 && ok {
   359  			break
   360  		}
   361  		t.Logf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1)
   362  		if i >= 10 {
   363  			t.Fatalf("GoroutineProfile not converging")
   364  		}
   365  	}
   366  }
   367  
   368  func BenchmarkGoroutineProfile(b *testing.B) {
   369  	run := func(fn func() bool) func(b *testing.B) {
   370  		runOne := func(b *testing.B) {
   371  			latencies := make([]time.Duration, 0, b.N)
   372  
   373  			b.ResetTimer()
   374  			for i := 0; i < b.N; i++ {
   375  				start := time.Now()
   376  				ok := fn()
   377  				if !ok {
   378  					b.Fatal("goroutine profile failed")
   379  				}
   380  				latencies = append(latencies, time.Since(start))
   381  			}
   382  			b.StopTimer()
   383  
   384  			// Sort latencies then report percentiles.
   385  			slices.Sort(latencies)
   386  			b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
   387  			b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
   388  			b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
   389  		}
   390  		return func(b *testing.B) {
   391  			b.Run("idle", runOne)
   392  
   393  			b.Run("loaded", func(b *testing.B) {
   394  				stop := applyGCLoad(b)
   395  				runOne(b)
   396  				// Make sure to stop the timer before we wait! The load created above
   397  				// is very heavy-weight and not easy to stop, so we could end up
   398  				// confusing the benchmarking framework for small b.N.
   399  				b.StopTimer()
   400  				stop()
   401  			})
   402  		}
   403  	}
   404  
   405  	// Measure the cost of counting goroutines
   406  	b.Run("small-nil", run(func() bool {
   407  		GoroutineProfile(nil)
   408  		return true
   409  	}))
   410  
   411  	// Measure the cost with a small set of goroutines
   412  	n := NumGoroutine()
   413  	p := make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   414  	b.Run("small", run(func() bool {
   415  		_, ok := GoroutineProfile(p)
   416  		return ok
   417  	}))
   418  
   419  	// Measure the cost with a large set of goroutines
   420  	ch := make(chan int)
   421  	var ready, done sync.WaitGroup
   422  	for i := 0; i < 5000; i++ {
   423  		ready.Add(1)
   424  		done.Add(1)
   425  		go func() { ready.Done(); <-ch; done.Done() }()
   426  	}
   427  	ready.Wait()
   428  
   429  	// Count goroutines with a large allgs list
   430  	b.Run("large-nil", run(func() bool {
   431  		GoroutineProfile(nil)
   432  		return true
   433  	}))
   434  
   435  	n = NumGoroutine()
   436  	p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   437  	b.Run("large", run(func() bool {
   438  		_, ok := GoroutineProfile(p)
   439  		return ok
   440  	}))
   441  
   442  	close(ch)
   443  	done.Wait()
   444  
   445  	// Count goroutines with a large (but unused) allgs list
   446  	b.Run("sparse-nil", run(func() bool {
   447  		GoroutineProfile(nil)
   448  		return true
   449  	}))
   450  
   451  	// Measure the cost of a large (but unused) allgs list
   452  	n = NumGoroutine()
   453  	p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   454  	b.Run("sparse", run(func() bool {
   455  		_, ok := GoroutineProfile(p)
   456  		return ok
   457  	}))
   458  }
   459  
   460  func TestVersion(t *testing.T) {
   461  	// Test that version does not contain \r or \n.
   462  	vers := Version()
   463  	if strings.Contains(vers, "\r") || strings.Contains(vers, "\n") {
   464  		t.Fatalf("cr/nl in version: %q", vers)
   465  	}
   466  }
   467  
   468  func TestTimediv(t *testing.T) {
   469  	for _, tc := range []struct {
   470  		num int64
   471  		div int32
   472  		ret int32
   473  		rem int32
   474  	}{
   475  		{
   476  			num: 8,
   477  			div: 2,
   478  			ret: 4,
   479  			rem: 0,
   480  		},
   481  		{
   482  			num: 9,
   483  			div: 2,
   484  			ret: 4,
   485  			rem: 1,
   486  		},
   487  		{
   488  			// Used by runtime.check.
   489  			num: 12345*1000000000 + 54321,
   490  			div: 1000000000,
   491  			ret: 12345,
   492  			rem: 54321,
   493  		},
   494  		{
   495  			num: 1<<32 - 1,
   496  			div: 2,
   497  			ret: 1<<31 - 1, // no overflow.
   498  			rem: 1,
   499  		},
   500  		{
   501  			num: 1 << 32,
   502  			div: 2,
   503  			ret: 1<<31 - 1, // overflow.
   504  			rem: 0,
   505  		},
   506  		{
   507  			num: 1 << 40,
   508  			div: 2,
   509  			ret: 1<<31 - 1, // overflow.
   510  			rem: 0,
   511  		},
   512  		{
   513  			num: 1<<40 + 1,
   514  			div: 1 << 10,
   515  			ret: 1 << 30,
   516  			rem: 1,
   517  		},
   518  	} {
   519  		name := fmt.Sprintf("%d div %d", tc.num, tc.div)
   520  		t.Run(name, func(t *testing.T) {
   521  			// Double check that the inputs make sense using
   522  			// standard 64-bit division.
   523  			ret64 := tc.num / int64(tc.div)
   524  			rem64 := tc.num % int64(tc.div)
   525  			if ret64 != int64(int32(ret64)) {
   526  				// Simulate timediv overflow value.
   527  				ret64 = 1<<31 - 1
   528  				rem64 = 0
   529  			}
   530  			if ret64 != int64(tc.ret) {
   531  				t.Errorf("%d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret64, rem64, tc.ret, tc.rem)
   532  			}
   533  
   534  			var rem int32
   535  			ret := Timediv(tc.num, tc.div, &rem)
   536  			if ret != tc.ret || rem != tc.rem {
   537  				t.Errorf("timediv %d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret, rem, tc.ret, tc.rem)
   538  			}
   539  		})
   540  	}
   541  }
   542  

View as plain text