Source file
src/runtime/runtime_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "flag"
9 "fmt"
10 "internal/cpu"
11 "internal/runtime/atomic"
12 "io"
13 "math/bits"
14 . "runtime"
15 "runtime/debug"
16 "slices"
17 "strings"
18 "sync"
19 "testing"
20 "time"
21 "unsafe"
22 )
23
24
25
26
27
28
29 var flagQuick = flag.Bool("quick", false, "skip slow tests, for cmd/dist test runtime:cpu124")
30
31 func init() {
32
33
34
35 SetTracebackEnv("system")
36 }
37
38 var errf error
39
40 func errfn() error {
41 return errf
42 }
43
44 func errfn1() error {
45 return io.EOF
46 }
47
48 func BenchmarkIfaceCmp100(b *testing.B) {
49 for i := 0; i < b.N; i++ {
50 for j := 0; j < 100; j++ {
51 if errfn() == io.EOF {
52 b.Fatal("bad comparison")
53 }
54 }
55 }
56 }
57
58 func BenchmarkIfaceCmpNil100(b *testing.B) {
59 for i := 0; i < b.N; i++ {
60 for j := 0; j < 100; j++ {
61 if errfn1() == nil {
62 b.Fatal("bad comparison")
63 }
64 }
65 }
66 }
67
68 var efaceCmp1 any
69 var efaceCmp2 any
70
71 func BenchmarkEfaceCmpDiff(b *testing.B) {
72 x := 5
73 efaceCmp1 = &x
74 y := 6
75 efaceCmp2 = &y
76 for i := 0; i < b.N; i++ {
77 for j := 0; j < 100; j++ {
78 if efaceCmp1 == efaceCmp2 {
79 b.Fatal("bad comparison")
80 }
81 }
82 }
83 }
84
85 func BenchmarkEfaceCmpDiffIndirect(b *testing.B) {
86 efaceCmp1 = [2]int{1, 2}
87 efaceCmp2 = [2]int{1, 2}
88 for i := 0; i < b.N; i++ {
89 for j := 0; j < 100; j++ {
90 if efaceCmp1 != efaceCmp2 {
91 b.Fatal("bad comparison")
92 }
93 }
94 }
95 }
96
97 func BenchmarkDefer(b *testing.B) {
98 for i := 0; i < b.N; i++ {
99 defer1()
100 }
101 }
102
103 func defer1() {
104 defer func(x, y, z int) {
105 if recover() != nil || x != 1 || y != 2 || z != 3 {
106 panic("bad recover")
107 }
108 }(1, 2, 3)
109 }
110
111 func BenchmarkDefer10(b *testing.B) {
112 for i := 0; i < b.N/10; i++ {
113 defer2()
114 }
115 }
116
117 func defer2() {
118 for i := 0; i < 10; i++ {
119 defer func(x, y, z int) {
120 if recover() != nil || x != 1 || y != 2 || z != 3 {
121 panic("bad recover")
122 }
123 }(1, 2, 3)
124 }
125 }
126
127 func BenchmarkDeferMany(b *testing.B) {
128 for i := 0; i < b.N; i++ {
129 defer func(x, y, z int) {
130 if recover() != nil || x != 1 || y != 2 || z != 3 {
131 panic("bad recover")
132 }
133 }(1, 2, 3)
134 }
135 }
136
137 func BenchmarkPanicRecover(b *testing.B) {
138 for i := 0; i < b.N; i++ {
139 defer3()
140 }
141 }
142
143 func defer3() {
144 defer func(x, y, z int) {
145 if recover() == nil {
146 panic("failed recover")
147 }
148 }(1, 2, 3)
149 panic("hi")
150 }
151
152
153 func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
154 SetCPUProfileRate(0)
155 }
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 var faultAddrs = []uint64{
171
172 0,
173 1,
174 0xfff,
175
176
177 0xffffffffffffffff,
178 0xfffffffffffff001,
179 0xffffffffffff0001,
180 0xfffffffffff00001,
181 0xffffffffff000001,
182 0xfffffffff0000001,
183 0xffffffff00000001,
184 0xfffffff000000001,
185 0xffffff0000000001,
186 0xfffff00000000001,
187 0xffff000000000001,
188 0xfff0000000000001,
189 0xff00000000000001,
190 0xf000000000000001,
191 0x8000000000000001,
192 }
193
194 func TestSetPanicOnFault(t *testing.T) {
195 old := debug.SetPanicOnFault(true)
196 defer debug.SetPanicOnFault(old)
197
198 nfault := 0
199 for _, addr := range faultAddrs {
200 testSetPanicOnFault(t, uintptr(addr), &nfault)
201 }
202 if nfault == 0 {
203 t.Fatalf("none of the addresses faulted")
204 }
205 }
206
207
208
209
210
211
212 func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {
213 if GOOS == "js" || GOOS == "wasip1" {
214 t.Skip(GOOS + " does not support catching faults")
215 }
216
217 defer func() {
218 if err := recover(); err != nil {
219 *nfault++
220 }
221 }()
222
223
224
225
226
227 v := *(*byte)(unsafe.Pointer(addr))
228 t.Logf("addr %#x: %#x\n", addr, v)
229 }
230
231 func eqstring_generic(s1, s2 string) bool {
232 if len(s1) != len(s2) {
233 return false
234 }
235
236
237 for i := 0; i < len(s1); i++ {
238 if s1[i] != s2[i] {
239 return false
240 }
241 }
242 return true
243 }
244
245 func TestEqString(t *testing.T) {
246
247
248
249 s := []string{
250 "",
251 "a",
252 "c",
253 "aaa",
254 "ccc",
255 "cccc"[:3],
256 "1234567890",
257 }
258 for _, s1 := range s {
259 for _, s2 := range s {
260 x := s1 == s2
261 y := eqstring_generic(s1, s2)
262 if x != y {
263 t.Errorf(`("%s" == "%s") = %t, want %t`, s1, s2, x, y)
264 }
265 }
266 }
267 }
268
269 func TestTrailingZero(t *testing.T) {
270
271 type T1 struct {
272 n int32
273 z [0]byte
274 }
275 if unsafe.Sizeof(T1{}) != 8 {
276 t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{}))
277 }
278 type T2 struct {
279 n int64
280 z struct{}
281 }
282 if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(uintptr(0)) {
283 t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(uintptr(0)))
284 }
285 type T3 struct {
286 n byte
287 z [4]struct{}
288 }
289 if unsafe.Sizeof(T3{}) != 2 {
290 t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{}))
291 }
292
293 type T4 struct {
294 a int32
295 b int16
296 c int8
297 z struct{}
298 }
299 if unsafe.Sizeof(T4{}) != 8 {
300 t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{}))
301 }
302
303 type T5 struct {
304 }
305 if unsafe.Sizeof(T5{}) != 0 {
306 t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{}))
307 }
308 }
309
310 func TestAppendGrowth(t *testing.T) {
311 var x []int64
312 check := func(want int) {
313 if cap(x) != want {
314 t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
315 }
316 }
317
318 check(0)
319 want := 1
320 for i := 1; i <= 100; i++ {
321 x = append(x, 1)
322 check(want)
323 if i&(i-1) == 0 {
324 want = 2 * i
325 }
326 }
327 }
328
329 var One = []int64{1}
330
331 func TestAppendSliceGrowth(t *testing.T) {
332 var x []int64
333 check := func(want int) {
334 if cap(x) != want {
335 t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
336 }
337 }
338
339 check(0)
340 want := 1
341 for i := 1; i <= 100; i++ {
342 x = append(x, One...)
343 check(want)
344 if i&(i-1) == 0 {
345 want = 2 * i
346 }
347 }
348 }
349
350 func TestGoroutineProfileTrivial(t *testing.T) {
351
352
353
354
355 for i := 0; ; i++ {
356 n1, ok := GoroutineProfile(nil)
357 if n1 < 1 || ok {
358 t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok)
359 }
360 n2, ok := GoroutineProfile(make([]StackRecord, n1))
361 if n2 == n1 && ok {
362 break
363 }
364 t.Logf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1)
365 if i >= 10 {
366 t.Fatalf("GoroutineProfile not converging")
367 }
368 }
369 }
370
371 func BenchmarkGoroutineProfile(b *testing.B) {
372 run := func(fn func() bool) func(b *testing.B) {
373 runOne := func(b *testing.B) {
374 latencies := make([]time.Duration, 0, b.N)
375
376 b.ResetTimer()
377 for i := 0; i < b.N; i++ {
378 start := time.Now()
379 ok := fn()
380 if !ok {
381 b.Fatal("goroutine profile failed")
382 }
383 latencies = append(latencies, time.Since(start))
384 }
385 b.StopTimer()
386
387
388 slices.Sort(latencies)
389 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
390 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
391 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
392 }
393 return func(b *testing.B) {
394 b.Run("idle", runOne)
395
396 b.Run("loaded", func(b *testing.B) {
397 stop := applyGCLoad(b)
398 runOne(b)
399
400
401
402 b.StopTimer()
403 stop()
404 })
405 }
406 }
407
408
409 b.Run("small-nil", run(func() bool {
410 GoroutineProfile(nil)
411 return true
412 }))
413
414
415 n := NumGoroutine()
416 p := make([]StackRecord, 2*n+2*GOMAXPROCS(0))
417 b.Run("small", run(func() bool {
418 _, ok := GoroutineProfile(p)
419 return ok
420 }))
421
422
423 ch := make(chan int)
424 var ready, done sync.WaitGroup
425 for i := 0; i < 5000; i++ {
426 ready.Add(1)
427 done.Add(1)
428 go func() { ready.Done(); <-ch; done.Done() }()
429 }
430 ready.Wait()
431
432
433 b.Run("large-nil", run(func() bool {
434 GoroutineProfile(nil)
435 return true
436 }))
437
438 n = NumGoroutine()
439 p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
440 b.Run("large", run(func() bool {
441 _, ok := GoroutineProfile(p)
442 return ok
443 }))
444
445 close(ch)
446 done.Wait()
447
448
449 b.Run("sparse-nil", run(func() bool {
450 GoroutineProfile(nil)
451 return true
452 }))
453
454
455 n = NumGoroutine()
456 p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
457 b.Run("sparse", run(func() bool {
458 _, ok := GoroutineProfile(p)
459 return ok
460 }))
461 }
462
463 func TestVersion(t *testing.T) {
464
465 vers := Version()
466 if strings.Contains(vers, "\r") || strings.Contains(vers, "\n") {
467 t.Fatalf("cr/nl in version: %q", vers)
468 }
469 }
470
471 func TestTimediv(t *testing.T) {
472 for _, tc := range []struct {
473 num int64
474 div int32
475 ret int32
476 rem int32
477 }{
478 {
479 num: 8,
480 div: 2,
481 ret: 4,
482 rem: 0,
483 },
484 {
485 num: 9,
486 div: 2,
487 ret: 4,
488 rem: 1,
489 },
490 {
491
492 num: 12345*1000000000 + 54321,
493 div: 1000000000,
494 ret: 12345,
495 rem: 54321,
496 },
497 {
498 num: 1<<32 - 1,
499 div: 2,
500 ret: 1<<31 - 1,
501 rem: 1,
502 },
503 {
504 num: 1 << 32,
505 div: 2,
506 ret: 1<<31 - 1,
507 rem: 0,
508 },
509 {
510 num: 1 << 40,
511 div: 2,
512 ret: 1<<31 - 1,
513 rem: 0,
514 },
515 {
516 num: 1<<40 + 1,
517 div: 1 << 10,
518 ret: 1 << 30,
519 rem: 1,
520 },
521 } {
522 name := fmt.Sprintf("%d div %d", tc.num, tc.div)
523 t.Run(name, func(t *testing.T) {
524
525
526 ret64 := tc.num / int64(tc.div)
527 rem64 := tc.num % int64(tc.div)
528 if ret64 != int64(int32(ret64)) {
529
530 ret64 = 1<<31 - 1
531 rem64 = 0
532 }
533 if ret64 != int64(tc.ret) {
534 t.Errorf("%d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret64, rem64, tc.ret, tc.rem)
535 }
536
537 var rem int32
538 ret := Timediv(tc.num, tc.div, &rem)
539 if ret != tc.ret || rem != tc.rem {
540 t.Errorf("timediv %d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret, rem, tc.ret, tc.rem)
541 }
542 })
543 }
544 }
545
546 func BenchmarkProcYield(b *testing.B) {
547 benchN := func(n uint32) func(*testing.B) {
548 return func(b *testing.B) {
549 for i := 0; i < b.N; i++ {
550 ProcYield(n)
551 }
552 }
553 }
554
555 b.Run("1", benchN(1))
556 b.Run("10", benchN(10))
557 b.Run("30", benchN(30))
558 b.Run("100", benchN(100))
559 b.Run("1000", benchN(1000))
560 }
561
562 func BenchmarkOSYield(b *testing.B) {
563 for i := 0; i < b.N; i++ {
564 OSYield()
565 }
566 }
567
568 func BenchmarkMutexContention(b *testing.B) {
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583 var state struct {
584 _ cpu.CacheLinePad
585 lock Mutex
586 _ cpu.CacheLinePad
587 count atomic.Int64
588 _ cpu.CacheLinePad
589 }
590
591 procs := GOMAXPROCS(0)
592 var wg sync.WaitGroup
593 for range procs {
594 wg.Add(1)
595 go func() {
596 defer wg.Done()
597 for {
598 Lock(&state.lock)
599 ours := state.count.Add(1)
600 Unlock(&state.lock)
601 if ours >= int64(b.N) {
602 return
603 }
604 }
605 }()
606 }
607 wg.Wait()
608 }
609
610 func BenchmarkMutexCapture(b *testing.B) {
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 var state struct {
627 _ cpu.CacheLinePad
628 lock Mutex
629 _ cpu.CacheLinePad
630 count atomic.Int64
631 _ cpu.CacheLinePad
632 }
633
634 procs := GOMAXPROCS(0)
635 var wg sync.WaitGroup
636 histograms := make(chan [2][65]int)
637 for range procs {
638 wg.Add(1)
639 go func() {
640 var (
641 prev int64
642 streak int64
643 histogram [2][65]int
644 )
645 for {
646 Lock(&state.lock)
647 ours := state.count.Add(1)
648 Unlock(&state.lock)
649 delta := ours - prev - 1
650 prev = ours
651 if delta == 0 {
652 streak++
653 } else {
654 histogram[0][bits.LeadingZeros64(uint64(streak))]++
655 histogram[1][bits.LeadingZeros64(uint64(delta))]++
656 streak = 1
657 }
658 if ours >= int64(b.N) {
659 wg.Done()
660 if delta == 0 {
661 histogram[0][bits.LeadingZeros64(uint64(streak))]++
662 histogram[1][bits.LeadingZeros64(uint64(delta))]++
663 }
664 histograms <- histogram
665 return
666 }
667 }
668 }()
669 }
670
671 wg.Wait()
672 b.StopTimer()
673
674 var histogram [2][65]int
675 for range procs {
676 h := <-histograms
677 for i := range h {
678 for j := range h[i] {
679 histogram[i][j] += h[i][j]
680 }
681 }
682 }
683
684 percentile := func(h [65]int, p float64) int {
685 sum := 0
686 for i, v := range h {
687 bound := uint64(1<<63) >> i
688 sum += int(bound) * v
689 }
690
691
692
693
694
695 part := 0
696 for i, v := range h {
697 bound := uint64(1<<63) >> i
698 part += int(bound) * v
699
700 if float64(sum-part) < float64(sum)*p {
701 return int(bound)
702 }
703 }
704
705 return 0
706 }
707
708 perOp := float64(b.Elapsed().Nanoseconds()) / float64(b.N)
709 b.ReportMetric(perOp*float64(percentile(histogram[0], 1.0)), "ns/streak-p100")
710 b.ReportMetric(perOp*float64(percentile(histogram[0], 0.9)), "ns/streak-p90")
711 b.ReportMetric(perOp*float64(percentile(histogram[1], 1.0)), "ns/starve-p100")
712 b.ReportMetric(perOp*float64(percentile(histogram[1], 0.9)), "ns/starve-p90")
713 }
714
715 func BenchmarkMutexHandoff(b *testing.B) {
716 testcase := func(delay func(l *Mutex)) func(b *testing.B) {
717 return func(b *testing.B) {
718 if workers := 2; GOMAXPROCS(0) < workers {
719 b.Skipf("requires GOMAXPROCS >= %d", workers)
720 }
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740 var state struct {
741 _ cpu.CacheLinePad
742 lock Mutex
743 _ cpu.CacheLinePad
744 turn atomic.Int64
745 _ cpu.CacheLinePad
746 }
747
748 var delta atomic.Int64
749 var wg sync.WaitGroup
750
751
752
753
754
755
756
757 wg.Add(1)
758 go func() {
759 defer wg.Done()
760 var t int64
761 for range b.N {
762 Lock(&state.lock)
763 state.turn.Add(2)
764 delay(&state.lock)
765 t -= Nanotime()
766 Unlock(&state.lock)
767 for state.turn.Load()&0x2 != 0 {
768 }
769 }
770 state.turn.Add(1)
771 delta.Add(t)
772 }()
773
774
775
776
777
778 wg.Add(1)
779 go func() {
780 defer wg.Done()
781 var t int64
782 for {
783 switch state.turn.Load() & 0x3 {
784 case 0:
785 case 1, 3:
786 delta.Add(t)
787 return
788 case 2:
789 Lock(&state.lock)
790 t += Nanotime()
791 Unlock(&state.lock)
792 state.turn.Add(2)
793 }
794 }
795 }()
796
797 wg.Wait()
798 b.ReportMetric(float64(delta.Load())/float64(b.N), "ns/op")
799 }
800 }
801
802 b.Run("Solo", func(b *testing.B) {
803 var lock Mutex
804 for range b.N {
805 Lock(&lock)
806 Unlock(&lock)
807 }
808 })
809
810 b.Run("FastPingPong", testcase(func(l *Mutex) {}))
811 b.Run("SlowPingPong", testcase(func(l *Mutex) {
812
813 for !MutexContended(l) {
814 }
815
816
817 const extraNs = 10e3
818 for t0 := Nanotime(); Nanotime()-t0 < extraNs; {
819 }
820 }))
821 }
822
View as plain text