Source file
src/runtime/gc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/asan"
10 "internal/testenv"
11 "internal/weak"
12 "math/bits"
13 "math/rand"
14 "os"
15 "reflect"
16 "runtime"
17 "runtime/debug"
18 "slices"
19 "strings"
20 "sync"
21 "sync/atomic"
22 "testing"
23 "time"
24 "unsafe"
25 )
26
27 func TestGcSys(t *testing.T) {
28 t.Skip("skipping known-flaky test; golang.org/issue/37331")
29 if os.Getenv("GOGC") == "off" {
30 t.Skip("skipping test; GOGC=off in environment")
31 }
32 got := runTestProg(t, "testprog", "GCSys")
33 want := "OK\n"
34 if got != want {
35 t.Fatalf("expected %q, but got %q", want, got)
36 }
37 }
38
39 func TestGcDeepNesting(t *testing.T) {
40 type T [2][2][2][2][2][2][2][2][2][2]*int
41 a := new(T)
42
43
44
45 t.Logf("%p", a)
46
47 a[0][0][0][0][0][0][0][0][0][0] = new(int)
48 *a[0][0][0][0][0][0][0][0][0][0] = 13
49 runtime.GC()
50 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
51 t.Fail()
52 }
53 }
54
55 func TestGcMapIndirection(t *testing.T) {
56 defer debug.SetGCPercent(debug.SetGCPercent(1))
57 runtime.GC()
58 type T struct {
59 a [256]int
60 }
61 m := make(map[T]T)
62 for i := 0; i < 2000; i++ {
63 var a T
64 a.a[0] = i
65 m[a] = T{}
66 }
67 }
68
69 func TestGcArraySlice(t *testing.T) {
70 type X struct {
71 buf [1]byte
72 nextbuf []byte
73 next *X
74 }
75 var head *X
76 for i := 0; i < 10; i++ {
77 p := &X{}
78 p.buf[0] = 42
79 p.next = head
80 if head != nil {
81 p.nextbuf = head.buf[:]
82 }
83 head = p
84 runtime.GC()
85 }
86 for p := head; p != nil; p = p.next {
87 if p.buf[0] != 42 {
88 t.Fatal("corrupted heap")
89 }
90 }
91 }
92
93 func TestGcRescan(t *testing.T) {
94 type X struct {
95 c chan error
96 nextx *X
97 }
98 type Y struct {
99 X
100 nexty *Y
101 p *int
102 }
103 var head *Y
104 for i := 0; i < 10; i++ {
105 p := &Y{}
106 p.c = make(chan error)
107 if head != nil {
108 p.nextx = &head.X
109 }
110 p.nexty = head
111 p.p = new(int)
112 *p.p = 42
113 head = p
114 runtime.GC()
115 }
116 for p := head; p != nil; p = p.nexty {
117 if *p.p != 42 {
118 t.Fatal("corrupted heap")
119 }
120 }
121 }
122
123 func TestGcLastTime(t *testing.T) {
124 ms := new(runtime.MemStats)
125 t0 := time.Now().UnixNano()
126 runtime.GC()
127 t1 := time.Now().UnixNano()
128 runtime.ReadMemStats(ms)
129 last := int64(ms.LastGC)
130 if t0 > last || last > t1 {
131 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
132 }
133 pause := ms.PauseNs[(ms.NumGC+255)%256]
134
135
136 if pause == 0 {
137 t.Logf("last GC pause was 0")
138 } else if pause > 10e9 {
139 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
140 }
141 }
142
143 var hugeSink any
144
145 func TestHugeGCInfo(t *testing.T) {
146
147
148 if hugeSink != nil {
149
150 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
151 hugeSink = new([n]*byte)
152 hugeSink = new([n]uintptr)
153 hugeSink = new(struct {
154 x float64
155 y [n]*byte
156 z []string
157 })
158 hugeSink = new(struct {
159 x float64
160 y [n]uintptr
161 z []string
162 })
163 }
164 }
165
166 func TestPeriodicGC(t *testing.T) {
167 if runtime.GOARCH == "wasm" {
168 t.Skip("no sysmon on wasm yet")
169 }
170
171
172 runtime.GC()
173
174 var ms1, ms2 runtime.MemStats
175 runtime.ReadMemStats(&ms1)
176
177
178 orig := *runtime.ForceGCPeriod
179 *runtime.ForceGCPeriod = 0
180
181
182
183
184
185 var numGCs uint32
186 const want = 2
187 for i := 0; i < 200 && numGCs < want; i++ {
188 time.Sleep(5 * time.Millisecond)
189
190
191 runtime.ReadMemStats(&ms2)
192 numGCs = ms2.NumGC - ms1.NumGC
193 }
194 *runtime.ForceGCPeriod = orig
195
196 if numGCs < want {
197 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
198 }
199 }
200
201 func TestGcZombieReporting(t *testing.T) {
202
203
204
205
206 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
207 want := "found pointer to free object"
208 if !strings.Contains(got, want) {
209 t.Fatalf("expected %q in output, but got %q", want, got)
210 }
211 }
212
213 func TestGCTestMoveStackOnNextCall(t *testing.T) {
214 if asan.Enabled {
215 t.Skip("extra allocations with -asan causes this to fail; see #70079")
216 }
217 t.Parallel()
218 var onStack int
219
220
221
222 for retry := 0; retry < 5; retry++ {
223 runtime.GCTestMoveStackOnNextCall()
224 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
225
226 return
227 }
228 }
229 t.Fatal("stack did not move")
230 }
231
232
233
234
235
236 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
237
238
239
240
241
242 new2 := uintptr(unsafe.Pointer(new))
243
244 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
245 if new2 == old {
246
247 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
248 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
249 }
250
251 return false
252 }
253 return true
254 }
255
256 func TestGCTestMoveStackRepeatedly(t *testing.T) {
257
258
259 for i := 0; i < 100; i++ {
260 runtime.GCTestMoveStackOnNextCall()
261 moveStack1(false)
262 }
263 }
264
265
266 func moveStack1(x bool) {
267
268 if x {
269 println("x")
270 }
271 }
272
273 func TestGCTestIsReachable(t *testing.T) {
274 var all, half []unsafe.Pointer
275 var want uint64
276 for i := 0; i < 16; i++ {
277
278
279 p := unsafe.Pointer(new(*int))
280 all = append(all, p)
281 if i%2 == 0 {
282 half = append(half, p)
283 want |= 1 << i
284 }
285 }
286
287 got := runtime.GCTestIsReachable(all...)
288 if got&want != want {
289
290
291 t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
292 }
293 if bits.OnesCount64(got&^want) > 1 {
294
295
296
297
298 t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
299 }
300 runtime.KeepAlive(half)
301 }
302
303 var pointerClassBSS *int
304 var pointerClassData = 42
305
306 func TestGCTestPointerClass(t *testing.T) {
307 if asan.Enabled {
308 t.Skip("extra allocations cause this test to fail; see #70079")
309 }
310 t.Parallel()
311 check := func(p unsafe.Pointer, want string) {
312 t.Helper()
313 got := runtime.GCTestPointerClass(p)
314 if got != want {
315
316
317 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
318 }
319 }
320 var onStack int
321 var notOnStack int
322 check(unsafe.Pointer(&onStack), "stack")
323 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
324 check(unsafe.Pointer(&pointerClassBSS), "bss")
325 check(unsafe.Pointer(&pointerClassData), "data")
326 check(nil, "other")
327 }
328
329 func BenchmarkAllocation(b *testing.B) {
330 type T struct {
331 x, y *byte
332 }
333 ngo := runtime.GOMAXPROCS(0)
334 work := make(chan bool, b.N+ngo)
335 result := make(chan *T)
336 for i := 0; i < b.N; i++ {
337 work <- true
338 }
339 for i := 0; i < ngo; i++ {
340 work <- false
341 }
342 for i := 0; i < ngo; i++ {
343 go func() {
344 var x *T
345 for <-work {
346 for i := 0; i < 1000; i++ {
347 x = &T{}
348 }
349 }
350 result <- x
351 }()
352 }
353 for i := 0; i < ngo; i++ {
354 <-result
355 }
356 }
357
358 func TestPrintGC(t *testing.T) {
359 if testing.Short() {
360 t.Skip("Skipping in short mode")
361 }
362 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
363 done := make(chan bool)
364 go func() {
365 for {
366 select {
367 case <-done:
368 return
369 default:
370 runtime.GC()
371 }
372 }
373 }()
374 for i := 0; i < 1e4; i++ {
375 func() {
376 defer print("")
377 }()
378 }
379 close(done)
380 }
381
382 func testTypeSwitch(x any) error {
383 switch y := x.(type) {
384 case nil:
385
386 case error:
387 return y
388 }
389 return nil
390 }
391
392 func testAssert(x any) error {
393 if y, ok := x.(error); ok {
394 return y
395 }
396 return nil
397 }
398
399 func testAssertVar(x any) error {
400 var y, ok = x.(error)
401 if ok {
402 return y
403 }
404 return nil
405 }
406
407 var a bool
408
409
410 func testIfaceEqual(x any) {
411 if x == "abc" {
412 a = true
413 }
414 }
415
416 func TestPageAccounting(t *testing.T) {
417
418
419
420 const blockSize = 64 << 10
421 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
422 for i := range blocks {
423 blocks[i] = new([blockSize]byte)
424 }
425
426
427 pagesInUse, counted := runtime.CountPagesInUse()
428 if pagesInUse != counted {
429 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
430 }
431 }
432
433 func init() {
434
435 *runtime.DoubleCheckReadMemStats = true
436 }
437
438 func TestReadMemStats(t *testing.T) {
439 base, slow := runtime.ReadMemStatsSlow()
440 if base != slow {
441 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
442 t.Fatal("memstats mismatch")
443 }
444 }
445
446 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
447 typ := got.Type()
448 switch typ.Kind() {
449 case reflect.Array, reflect.Slice:
450 if got.Len() != want.Len() {
451 t.Logf("len(%s): got %v, want %v", prefix, got, want)
452 return
453 }
454 for i := 0; i < got.Len(); i++ {
455 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
456 }
457 case reflect.Struct:
458 for i := 0; i < typ.NumField(); i++ {
459 gf, wf := got.Field(i), want.Field(i)
460 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
461 }
462 case reflect.Map:
463 t.Fatal("not implemented: logDiff for map")
464 default:
465 if got.Interface() != want.Interface() {
466 t.Logf("%s: got %v, want %v", prefix, got, want)
467 }
468 }
469 }
470
471 func BenchmarkReadMemStats(b *testing.B) {
472 var ms runtime.MemStats
473 const heapSize = 100 << 20
474 x := make([]*[1024]byte, heapSize/1024)
475 for i := range x {
476 x[i] = new([1024]byte)
477 }
478
479 b.ResetTimer()
480 for i := 0; i < b.N; i++ {
481 runtime.ReadMemStats(&ms)
482 }
483
484 runtime.KeepAlive(x)
485 }
486
487 func applyGCLoad(b *testing.B) func() {
488
489
490
491
492 maxProcs := runtime.GOMAXPROCS(-1)
493 if maxProcs == 1 {
494 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
495 }
496
497
498 type node struct {
499 children [16]*node
500 }
501 var buildTree func(depth int) *node
502 buildTree = func(depth int) *node {
503 tree := new(node)
504 if depth != 0 {
505 for i := range tree.children {
506 tree.children[i] = buildTree(depth - 1)
507 }
508 }
509 return tree
510 }
511
512
513 done := make(chan struct{})
514 var wg sync.WaitGroup
515 for i := 0; i < maxProcs-1; i++ {
516 wg.Add(1)
517 go func() {
518 defer wg.Done()
519 var hold *node
520 loop:
521 for {
522 hold = buildTree(5)
523 select {
524 case <-done:
525 break loop
526 default:
527 }
528 }
529 runtime.KeepAlive(hold)
530 }()
531 }
532 return func() {
533 close(done)
534 wg.Wait()
535 }
536 }
537
538 func BenchmarkReadMemStatsLatency(b *testing.B) {
539 stop := applyGCLoad(b)
540
541
542 latencies := make([]time.Duration, 0, 1024)
543
544
545
546 b.ResetTimer()
547 var ms runtime.MemStats
548 for i := 0; i < b.N; i++ {
549
550
551 time.Sleep(100 * time.Millisecond)
552 start := time.Now()
553 runtime.ReadMemStats(&ms)
554 latencies = append(latencies, time.Since(start))
555 }
556
557
558
559 b.StopTimer()
560 stop()
561
562
563
564
565 b.ReportMetric(0, "ns/op")
566 b.ReportMetric(0, "B/op")
567 b.ReportMetric(0, "allocs/op")
568
569
570 slices.Sort(latencies)
571 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
572 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
573 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
574 }
575
576 func TestUserForcedGC(t *testing.T) {
577
578 defer debug.SetGCPercent(debug.SetGCPercent(-1))
579
580 var ms1, ms2 runtime.MemStats
581 runtime.ReadMemStats(&ms1)
582 runtime.GC()
583 runtime.ReadMemStats(&ms2)
584 if ms1.NumGC == ms2.NumGC {
585 t.Fatalf("runtime.GC() did not trigger GC")
586 }
587 if ms1.NumForcedGC == ms2.NumForcedGC {
588 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
589 }
590 }
591
592 func writeBarrierBenchmark(b *testing.B, f func()) {
593 runtime.GC()
594 var ms runtime.MemStats
595 runtime.ReadMemStats(&ms)
596
597
598
599
600 var stop uint32
601 done := make(chan bool)
602 go func() {
603 for atomic.LoadUint32(&stop) == 0 {
604 runtime.GC()
605 }
606 close(done)
607 }()
608 defer func() {
609 atomic.StoreUint32(&stop, 1)
610 <-done
611 }()
612
613 b.ResetTimer()
614 f()
615 b.StopTimer()
616 }
617
618 func BenchmarkWriteBarrier(b *testing.B) {
619 if runtime.GOMAXPROCS(-1) < 2 {
620
621 b.Skip("need GOMAXPROCS >= 2")
622 }
623
624
625
626 type node struct {
627 l, r *node
628 }
629 var wbRoots []*node
630 var mkTree func(level int) *node
631 mkTree = func(level int) *node {
632 if level == 0 {
633 return nil
634 }
635 n := &node{mkTree(level - 1), mkTree(level - 1)}
636 if level == 10 {
637
638
639
640 wbRoots = append(wbRoots, n)
641 }
642 return n
643 }
644 const depth = 22
645 root := mkTree(22)
646
647 writeBarrierBenchmark(b, func() {
648 var stack [depth]*node
649 tos := -1
650
651
652 for i := 0; i < b.N; i += 2 {
653 if tos == -1 {
654 stack[0] = root
655 tos = 0
656 }
657
658
659 n := stack[tos]
660 if n.l == nil {
661 tos--
662 } else {
663 n.l, n.r = n.r, n.l
664 stack[tos] = n.l
665 stack[tos+1] = n.r
666 tos++
667 }
668
669 if i%(1<<12) == 0 {
670
671 runtime.Gosched()
672 }
673 }
674 })
675
676 runtime.KeepAlive(wbRoots)
677 }
678
679 func BenchmarkBulkWriteBarrier(b *testing.B) {
680 if runtime.GOMAXPROCS(-1) < 2 {
681
682 b.Skip("need GOMAXPROCS >= 2")
683 }
684
685
686 const heapSize = 64 << 20
687 type obj [16]*byte
688 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
689 for i := range ptrs {
690 ptrs[i] = new(obj)
691 }
692
693 writeBarrierBenchmark(b, func() {
694 const blockSize = 1024
695 var pos int
696 for i := 0; i < b.N; i += blockSize {
697
698 block := ptrs[pos : pos+blockSize]
699 first := block[0]
700 copy(block, block[1:])
701 block[blockSize-1] = first
702
703 pos += blockSize
704 if pos+blockSize > len(ptrs) {
705 pos = 0
706 }
707
708 runtime.Gosched()
709 }
710 })
711
712 runtime.KeepAlive(ptrs)
713 }
714
715 func BenchmarkScanStackNoLocals(b *testing.B) {
716 var ready sync.WaitGroup
717 teardown := make(chan bool)
718 for j := 0; j < 10; j++ {
719 ready.Add(1)
720 go func() {
721 x := 100000
722 countpwg(&x, &ready, teardown)
723 }()
724 }
725 ready.Wait()
726 b.ResetTimer()
727 for i := 0; i < b.N; i++ {
728 b.StartTimer()
729 runtime.GC()
730 runtime.GC()
731 b.StopTimer()
732 }
733 close(teardown)
734 }
735
736 func BenchmarkMSpanCountAlloc(b *testing.B) {
737
738 s := runtime.AllocMSpan()
739 defer runtime.FreeMSpan(s)
740
741
742
743
744 for _, n := range []int{8, 16, 32, 64, 128} {
745 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
746
747 bits := make([]byte, n)
748 rand.Read(bits)
749
750 b.ResetTimer()
751 for i := 0; i < b.N; i++ {
752 runtime.MSpanCountAlloc(s, bits)
753 }
754 })
755 }
756 }
757
758 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
759 if *n == 0 {
760 ready.Done()
761 <-teardown
762 return
763 }
764 *n--
765 countpwg(n, ready, teardown)
766 }
767
768 func TestMemoryLimit(t *testing.T) {
769 if testing.Short() {
770 t.Skip("stress test that takes time to run")
771 }
772 if runtime.NumCPU() < 4 {
773 t.Skip("want at least 4 CPUs for this test")
774 }
775 got := runTestProg(t, "testprog", "GCMemoryLimit")
776 want := "OK\n"
777 if got != want {
778 t.Fatalf("expected %q, but got %q", want, got)
779 }
780 }
781
782 func TestMemoryLimitNoGCPercent(t *testing.T) {
783 if testing.Short() {
784 t.Skip("stress test that takes time to run")
785 }
786 if runtime.NumCPU() < 4 {
787 t.Skip("want at least 4 CPUs for this test")
788 }
789 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
790 want := "OK\n"
791 if got != want {
792 t.Fatalf("expected %q, but got %q", want, got)
793 }
794 }
795
796 func TestMyGenericFunc(t *testing.T) {
797 runtime.MyGenericFunc[int]()
798 }
799
800 func TestWeakToStrongMarkTermination(t *testing.T) {
801 testenv.MustHaveParallelism(t)
802
803 type T struct {
804 a *int
805 b int
806 }
807 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
808 defer debug.SetGCPercent(debug.SetGCPercent(-1))
809 w := make([]weak.Pointer[T], 2048)
810
811
812 runtime.GC()
813
814
815 for i := range w {
816 x := new(T)
817 x.a = new(int)
818 w[i] = weak.Make(x)
819 }
820
821
822 runtime.GCMarkDoneResetRestartFlag()
823
824
825 runtime.SetSpinInGCMarkDone(true)
826
827
828
829
830
831 done := make(chan struct{})
832 go func() {
833 runtime.GC()
834 done <- struct{}{}
835 }()
836 go func() {
837 time.Sleep(100 * time.Millisecond)
838
839
840 runtime.SetSpinInGCMarkDone(false)
841 }()
842 time.Sleep(10 * time.Millisecond)
843
844
845 var wg sync.WaitGroup
846 for _, wp := range w {
847 wg.Add(1)
848 go func() {
849 defer wg.Done()
850 wp.Strong()
851 }()
852 }
853
854
855 <-done
856
857
858 wg.Wait()
859
860
861
862
863
864
865
866
867
868
869
870 if runtime.GCMarkDoneRestarted() {
871 t.Errorf("gcMarkDone restarted")
872 }
873 }
874
View as plain text