Source file
src/runtime/proc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/race"
10 "internal/testenv"
11 "math"
12 "net"
13 "runtime"
14 "runtime/debug"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "syscall"
19 "testing"
20 "time"
21 )
22
23 var stop = make(chan bool, 1)
24
25 func perpetuumMobile() {
26 select {
27 case <-stop:
28 default:
29 go perpetuumMobile()
30 }
31 }
32
33 func TestStopTheWorldDeadlock(t *testing.T) {
34 if runtime.GOARCH == "wasm" {
35 t.Skip("no preemption on wasm yet")
36 }
37 if testing.Short() {
38 t.Skip("skipping during short test")
39 }
40 maxprocs := runtime.GOMAXPROCS(3)
41 compl := make(chan bool, 2)
42 go func() {
43 for i := 0; i != 1000; i += 1 {
44 runtime.GC()
45 }
46 compl <- true
47 }()
48 go func() {
49 for i := 0; i != 1000; i += 1 {
50 runtime.GOMAXPROCS(3)
51 }
52 compl <- true
53 }()
54 go perpetuumMobile()
55 <-compl
56 <-compl
57 stop <- true
58 runtime.GOMAXPROCS(maxprocs)
59 }
60
61 func TestYieldProgress(t *testing.T) {
62 testYieldProgress(false)
63 }
64
65 func TestYieldLockedProgress(t *testing.T) {
66 testYieldProgress(true)
67 }
68
69 func testYieldProgress(locked bool) {
70 c := make(chan bool)
71 cack := make(chan bool)
72 go func() {
73 if locked {
74 runtime.LockOSThread()
75 }
76 for {
77 select {
78 case <-c:
79 cack <- true
80 return
81 default:
82 runtime.Gosched()
83 }
84 }
85 }()
86 time.Sleep(10 * time.Millisecond)
87 c <- true
88 <-cack
89 }
90
91 func TestYieldLocked(t *testing.T) {
92 const N = 10
93 c := make(chan bool)
94 go func() {
95 runtime.LockOSThread()
96 for i := 0; i < N; i++ {
97 runtime.Gosched()
98 time.Sleep(time.Millisecond)
99 }
100 c <- true
101
102 }()
103 <-c
104 }
105
106 func TestGoroutineParallelism(t *testing.T) {
107 if runtime.NumCPU() == 1 {
108
109 t.Skip("skipping on uniprocessor")
110 }
111 P := 4
112 N := 10
113 if testing.Short() {
114 P = 3
115 N = 3
116 }
117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
118
119
120
121 defer debug.SetGCPercent(debug.SetGCPercent(-1))
122
123
124
125 runtime.GC()
126 for try := 0; try < N; try++ {
127 done := make(chan bool)
128 x := uint32(0)
129 for p := 0; p < P; p++ {
130
131 go func(p int) {
132 for i := 0; i < 3; i++ {
133 expected := uint32(P*i + p)
134 for atomic.LoadUint32(&x) != expected {
135 }
136 atomic.StoreUint32(&x, expected+1)
137 }
138 done <- true
139 }(p)
140 }
141 for p := 0; p < P; p++ {
142 <-done
143 }
144 }
145 }
146
147
148 func TestGoroutineParallelism2(t *testing.T) {
149
150 testGoroutineParallelism2(t, true, false)
151 testGoroutineParallelism2(t, false, true)
152 testGoroutineParallelism2(t, true, true)
153 }
154
155 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
156 if runtime.NumCPU() == 1 {
157
158 t.Skip("skipping on uniprocessor")
159 }
160 P := 4
161 N := 10
162 if testing.Short() {
163 N = 3
164 }
165 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
166
167
168
169 defer debug.SetGCPercent(debug.SetGCPercent(-1))
170
171
172
173 runtime.GC()
174 for try := 0; try < N; try++ {
175 if load {
176
177
178
179 done := make(chan bool)
180 x := uint32(0)
181 for p := 0; p < P; p++ {
182 go func() {
183 if atomic.AddUint32(&x, 1) == uint32(P) {
184 done <- true
185 return
186 }
187 for atomic.LoadUint32(&x) != uint32(P) {
188 }
189 }()
190 }
191 <-done
192 }
193 if netpoll {
194
195 laddr := "localhost:0"
196 if runtime.GOOS == "android" {
197
198
199
200 laddr = "127.0.0.1:0"
201 }
202 ln, err := net.Listen("tcp", laddr)
203 if err == nil {
204 defer ln.Close()
205 }
206 }
207 done := make(chan bool)
208 x := uint32(0)
209
210 for p := 0; p < P/2; p++ {
211 go func(p int) {
212 for p2 := 0; p2 < 2; p2++ {
213 go func(p2 int) {
214 for i := 0; i < 3; i++ {
215 expected := uint32(P*i + p*2 + p2)
216 for atomic.LoadUint32(&x) != expected {
217 }
218 atomic.StoreUint32(&x, expected+1)
219 }
220 done <- true
221 }(p2)
222 }
223 }(p)
224 }
225 for p := 0; p < P; p++ {
226 <-done
227 }
228 }
229 }
230
231 func TestBlockLocked(t *testing.T) {
232 const N = 10
233 c := make(chan bool)
234 go func() {
235 runtime.LockOSThread()
236 for i := 0; i < N; i++ {
237 c <- true
238 }
239 runtime.UnlockOSThread()
240 }()
241 for i := 0; i < N; i++ {
242 <-c
243 }
244 }
245
246 func TestTimerFairness(t *testing.T) {
247 if runtime.GOARCH == "wasm" {
248 t.Skip("no preemption on wasm yet")
249 }
250
251 done := make(chan bool)
252 c := make(chan bool)
253 for i := 0; i < 2; i++ {
254 go func() {
255 for {
256 select {
257 case c <- true:
258 case <-done:
259 return
260 }
261 }
262 }()
263 }
264
265 timer := time.After(20 * time.Millisecond)
266 for {
267 select {
268 case <-c:
269 case <-timer:
270 close(done)
271 return
272 }
273 }
274 }
275
276 func TestTimerFairness2(t *testing.T) {
277 if runtime.GOARCH == "wasm" {
278 t.Skip("no preemption on wasm yet")
279 }
280
281 done := make(chan bool)
282 c := make(chan bool)
283 for i := 0; i < 2; i++ {
284 go func() {
285 timer := time.After(20 * time.Millisecond)
286 var buf [1]byte
287 for {
288 syscall.Read(0, buf[0:0])
289 select {
290 case c <- true:
291 case <-c:
292 case <-timer:
293 done <- true
294 return
295 }
296 }
297 }()
298 }
299 <-done
300 <-done
301 }
302
303
304
305 var preempt = func() int {
306 var a [128]int
307 sum := 0
308 for _, v := range a {
309 sum += v
310 }
311 return sum
312 }
313
314 func TestPreemption(t *testing.T) {
315 if runtime.GOARCH == "wasm" {
316 t.Skip("no preemption on wasm yet")
317 }
318
319
320 N := 5
321 if testing.Short() {
322 N = 2
323 }
324 c := make(chan bool)
325 var x uint32
326 for g := 0; g < 2; g++ {
327 go func(g int) {
328 for i := 0; i < N; i++ {
329 for atomic.LoadUint32(&x) != uint32(g) {
330 preempt()
331 }
332 atomic.StoreUint32(&x, uint32(1-g))
333 }
334 c <- true
335 }(g)
336 }
337 <-c
338 <-c
339 }
340
341 func TestPreemptionGC(t *testing.T) {
342 if runtime.GOARCH == "wasm" {
343 t.Skip("no preemption on wasm yet")
344 }
345
346
347 P := 5
348 N := 10
349 if testing.Short() {
350 P = 3
351 N = 2
352 }
353 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
354 var stop uint32
355 for i := 0; i < P; i++ {
356 go func() {
357 for atomic.LoadUint32(&stop) == 0 {
358 preempt()
359 }
360 }()
361 }
362 for i := 0; i < N; i++ {
363 runtime.Gosched()
364 runtime.GC()
365 }
366 atomic.StoreUint32(&stop, 1)
367 }
368
369 func TestAsyncPreempt(t *testing.T) {
370 if !runtime.PreemptMSupported {
371 t.Skip("asynchronous preemption not supported on this platform")
372 }
373 output := runTestProg(t, "testprog", "AsyncPreempt")
374 want := "OK\n"
375 if output != want {
376 t.Fatalf("want %s, got %s\n", want, output)
377 }
378 }
379
380 func TestGCFairness(t *testing.T) {
381 output := runTestProg(t, "testprog", "GCFairness")
382 want := "OK\n"
383 if output != want {
384 t.Fatalf("want %s, got %s\n", want, output)
385 }
386 }
387
388 func TestGCFairness2(t *testing.T) {
389 output := runTestProg(t, "testprog", "GCFairness2")
390 want := "OK\n"
391 if output != want {
392 t.Fatalf("want %s, got %s\n", want, output)
393 }
394 }
395
396 func TestNumGoroutine(t *testing.T) {
397 output := runTestProg(t, "testprog", "NumGoroutine")
398 want := "1\n"
399 if output != want {
400 t.Fatalf("want %q, got %q", want, output)
401 }
402
403 buf := make([]byte, 1<<20)
404
405
406
407
408 for i := 0; ; i++ {
409
410
411
412
413 runtime.Gosched()
414
415 n := runtime.NumGoroutine()
416 buf = buf[:runtime.Stack(buf, true)]
417
418
419
420 output := strings.ReplaceAll(string(buf), "in goroutine", "")
421 nstk := strings.Count(output, "goroutine ")
422 if n == nstk {
423 break
424 }
425 if i >= 10 {
426 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
427 }
428 }
429 }
430
431 func TestPingPongHog(t *testing.T) {
432 if runtime.GOARCH == "wasm" {
433 t.Skip("no preemption on wasm yet")
434 }
435 if testing.Short() {
436 t.Skip("skipping in -short mode")
437 }
438 if race.Enabled {
439
440
441 t.Skip("skipping in -race mode")
442 }
443
444 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
445 done := make(chan bool)
446 hogChan, lightChan := make(chan bool), make(chan bool)
447 hogCount, lightCount := 0, 0
448
449 run := func(limit int, counter *int, wake chan bool) {
450 for {
451 select {
452 case <-done:
453 return
454
455 case <-wake:
456 for i := 0; i < limit; i++ {
457 *counter++
458 }
459 wake <- true
460 }
461 }
462 }
463
464
465 for i := 0; i < 2; i++ {
466 go run(1e6, &hogCount, hogChan)
467 }
468
469
470 for i := 0; i < 2; i++ {
471 go run(1e3, &lightCount, lightChan)
472 }
473
474
475 hogChan <- true
476 lightChan <- true
477 time.Sleep(100 * time.Millisecond)
478 close(done)
479 <-hogChan
480 <-lightChan
481
482
483
484
485
486
487
488 const factor = 20
489 if hogCount/factor > lightCount || lightCount/factor > hogCount {
490 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
491 }
492 }
493
494 func BenchmarkPingPongHog(b *testing.B) {
495 if b.N == 0 {
496 return
497 }
498 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
499
500
501 stop, done := make(chan bool), make(chan bool)
502 go func() {
503 for {
504 select {
505 case <-stop:
506 done <- true
507 return
508 default:
509 }
510 }
511 }()
512
513
514 ping, pong := make(chan bool), make(chan bool)
515 go func() {
516 for j := 0; j < b.N; j++ {
517 pong <- <-ping
518 }
519 close(stop)
520 done <- true
521 }()
522 go func() {
523 for i := 0; i < b.N; i++ {
524 ping <- <-pong
525 }
526 done <- true
527 }()
528 b.ResetTimer()
529 ping <- true
530 <-stop
531 b.StopTimer()
532 <-ping
533 <-done
534 <-done
535 <-done
536 }
537
538 var padData [128]uint64
539
540 func stackGrowthRecursive(i int) {
541 var pad [128]uint64
542 pad = padData
543 for j := range pad {
544 if pad[j] != 0 {
545 return
546 }
547 }
548 if i != 0 {
549 stackGrowthRecursive(i - 1)
550 }
551 }
552
553 func TestPreemptSplitBig(t *testing.T) {
554 if testing.Short() {
555 t.Skip("skipping in -short mode")
556 }
557 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
558 stop := make(chan int)
559 go big(stop)
560 for i := 0; i < 3; i++ {
561 time.Sleep(10 * time.Microsecond)
562 runtime.GC()
563 }
564 close(stop)
565 }
566
567 func big(stop chan int) int {
568 n := 0
569 for {
570
571 for i := 0; i < 1e9; i++ {
572 n++
573 }
574
575
576 bigframe(stop)
577
578
579 select {
580 case <-stop:
581 return n
582 }
583 }
584 }
585
586 func bigframe(stop chan int) int {
587
588
589
590 var x [8192]byte
591 return small(stop, &x)
592 }
593
594 func small(stop chan int, x *[8192]byte) int {
595 for i := range x {
596 x[i] = byte(i)
597 }
598 sum := 0
599 for i := range x {
600 sum += int(x[i])
601 }
602
603
604
605 nonleaf(stop)
606
607 return sum
608 }
609
610 func nonleaf(stop chan int) bool {
611
612 select {
613 case <-stop:
614 return true
615 default:
616 return false
617 }
618 }
619
620 func TestSchedLocalQueue(t *testing.T) {
621 runtime.RunSchedLocalQueueTest()
622 }
623
624 func TestSchedLocalQueueSteal(t *testing.T) {
625 runtime.RunSchedLocalQueueStealTest()
626 }
627
628 func TestSchedLocalQueueEmpty(t *testing.T) {
629 if runtime.NumCPU() == 1 {
630
631 t.Skip("skipping on uniprocessor")
632 }
633 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
634
635
636
637 defer debug.SetGCPercent(debug.SetGCPercent(-1))
638
639
640
641 runtime.GC()
642
643 iters := int(1e5)
644 if testing.Short() {
645 iters = 1e2
646 }
647 runtime.RunSchedLocalQueueEmptyTest(iters)
648 }
649
650 func benchmarkStackGrowth(b *testing.B, rec int) {
651 b.RunParallel(func(pb *testing.PB) {
652 for pb.Next() {
653 stackGrowthRecursive(rec)
654 }
655 })
656 }
657
658 func BenchmarkStackGrowth(b *testing.B) {
659 benchmarkStackGrowth(b, 10)
660 }
661
662 func BenchmarkStackGrowthDeep(b *testing.B) {
663 benchmarkStackGrowth(b, 1024)
664 }
665
666 func BenchmarkCreateGoroutines(b *testing.B) {
667 benchmarkCreateGoroutines(b, 1)
668 }
669
670 func BenchmarkCreateGoroutinesParallel(b *testing.B) {
671 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
672 }
673
674 func benchmarkCreateGoroutines(b *testing.B, procs int) {
675 c := make(chan bool)
676 var f func(n int)
677 f = func(n int) {
678 if n == 0 {
679 c <- true
680 return
681 }
682 go f(n - 1)
683 }
684 for i := 0; i < procs; i++ {
685 go f(b.N / procs)
686 }
687 for i := 0; i < procs; i++ {
688 <-c
689 }
690 }
691
692 func BenchmarkCreateGoroutinesCapture(b *testing.B) {
693 b.ReportAllocs()
694 for i := 0; i < b.N; i++ {
695 const N = 4
696 var wg sync.WaitGroup
697 wg.Add(N)
698 for i := 0; i < N; i++ {
699 i := i
700 go func() {
701 if i >= N {
702 b.Logf("bad")
703 }
704 wg.Done()
705 }()
706 }
707 wg.Wait()
708 }
709 }
710
711
712
713 func warmupScheduler(targetThreadCount int) {
714 var wg sync.WaitGroup
715 var count int32
716 for i := 0; i < targetThreadCount; i++ {
717 wg.Add(1)
718 go func() {
719 atomic.AddInt32(&count, 1)
720 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
721
722 }
723
724
725 doWork(time.Millisecond)
726 wg.Done()
727 }()
728 }
729 wg.Wait()
730 }
731
732 func doWork(dur time.Duration) {
733 start := time.Now()
734 for time.Since(start) < dur {
735 }
736 }
737
738
739
740
741
742
743
744 func BenchmarkCreateGoroutinesSingle(b *testing.B) {
745
746
747 warmupScheduler(runtime.GOMAXPROCS(0))
748 b.ResetTimer()
749
750 var wg sync.WaitGroup
751 wg.Add(b.N)
752 for i := 0; i < b.N; i++ {
753 go func() {
754 wg.Done()
755 }()
756 }
757 wg.Wait()
758 }
759
760 func BenchmarkClosureCall(b *testing.B) {
761 sum := 0
762 off1 := 1
763 for i := 0; i < b.N; i++ {
764 off2 := 2
765 func() {
766 sum += i + off1 + off2
767 }()
768 }
769 _ = sum
770 }
771
772 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
773 if runtime.GOMAXPROCS(0) == 1 {
774 b.Skip("skipping: GOMAXPROCS=1")
775 }
776
777 wakeDelay := 5 * time.Microsecond
778 for _, delay := range []time.Duration{
779 0,
780 1 * time.Microsecond,
781 2 * time.Microsecond,
782 5 * time.Microsecond,
783 10 * time.Microsecond,
784 20 * time.Microsecond,
785 50 * time.Microsecond,
786 100 * time.Microsecond,
787 } {
788 b.Run(delay.String(), func(b *testing.B) {
789 if b.N == 0 {
790 return
791 }
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824 ping, pong := make(chan struct{}), make(chan struct{})
825 start := make(chan struct{})
826 done := make(chan struct{})
827 go func() {
828 <-start
829 for i := 0; i < b.N; i++ {
830
831 spin(delay + wakeDelay)
832 ping <- struct{}{}
833
834 spin(delay)
835 <-pong
836 }
837 done <- struct{}{}
838 }()
839 go func() {
840 for i := 0; i < b.N; i++ {
841
842 spin(delay)
843 <-ping
844
845 spin(delay + wakeDelay)
846 pong <- struct{}{}
847 }
848 done <- struct{}{}
849 }()
850 b.ResetTimer()
851 start <- struct{}{}
852 <-done
853 <-done
854 })
855 }
856 }
857
858 func BenchmarkWakeupParallelSpinning(b *testing.B) {
859 benchmarkWakeupParallel(b, func(d time.Duration) {
860 end := time.Now().Add(d)
861 for time.Now().Before(end) {
862
863 }
864 })
865 }
866
867
868
869
870
871 var sysNanosleep func(d time.Duration)
872
873 func BenchmarkWakeupParallelSyscall(b *testing.B) {
874 if sysNanosleep == nil {
875 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
876 }
877 benchmarkWakeupParallel(b, func(d time.Duration) {
878 sysNanosleep(d)
879 })
880 }
881
882 type Matrix [][]float64
883
884 func BenchmarkMatmult(b *testing.B) {
885 b.StopTimer()
886
887
888 n := int(math.Cbrt(float64(b.N))) + 1
889 A := makeMatrix(n)
890 B := makeMatrix(n)
891 C := makeMatrix(n)
892 b.StartTimer()
893 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
894 }
895
896 func makeMatrix(n int) Matrix {
897 m := make(Matrix, n)
898 for i := 0; i < n; i++ {
899 m[i] = make([]float64, n)
900 for j := 0; j < n; j++ {
901 m[i][j] = float64(i*n + j)
902 }
903 }
904 return m
905 }
906
907 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
908 di := i1 - i0
909 dj := j1 - j0
910 dk := k1 - k0
911 if di >= dj && di >= dk && di >= threshold {
912
913 mi := i0 + di/2
914 done1 := make(chan struct{}, 1)
915 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
916 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
917 <-done1
918 } else if dj >= dk && dj >= threshold {
919
920 mj := j0 + dj/2
921 done1 := make(chan struct{}, 1)
922 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
923 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
924 <-done1
925 } else if dk >= threshold {
926
927
928 mk := k0 + dk/2
929 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
930 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
931 } else {
932
933 for i := i0; i < i1; i++ {
934 for j := j0; j < j1; j++ {
935 for k := k0; k < k1; k++ {
936 C[i][j] += A[i][k] * B[k][j]
937 }
938 }
939 }
940 }
941 if done != nil {
942 done <- struct{}{}
943 }
944 }
945
946 func TestStealOrder(t *testing.T) {
947 runtime.RunStealOrderTest()
948 }
949
950 func TestLockOSThreadNesting(t *testing.T) {
951 if runtime.GOARCH == "wasm" {
952 t.Skip("no threads on wasm yet")
953 }
954
955 go func() {
956 e, i := runtime.LockOSCounts()
957 if e != 0 || i != 0 {
958 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
959 return
960 }
961 runtime.LockOSThread()
962 runtime.LockOSThread()
963 runtime.UnlockOSThread()
964 e, i = runtime.LockOSCounts()
965 if e != 1 || i != 0 {
966 t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
967 return
968 }
969 runtime.UnlockOSThread()
970 e, i = runtime.LockOSCounts()
971 if e != 0 || i != 0 {
972 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
973 return
974 }
975 }()
976 }
977
978 func TestLockOSThreadExit(t *testing.T) {
979 testLockOSThreadExit(t, "testprog")
980 }
981
982 func testLockOSThreadExit(t *testing.T, prog string) {
983 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
984 want := "OK\n"
985 if output != want {
986 t.Errorf("want %q, got %q", want, output)
987 }
988
989 output = runTestProg(t, prog, "LockOSThreadAlt")
990 if output != want {
991 t.Errorf("want %q, got %q", want, output)
992 }
993 }
994
995 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
996 want := "OK\n"
997 skip := "unshare not permitted\n"
998 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
999 if output == skip {
1000 t.Skip("unshare syscall not permitted on this system")
1001 } else if output != want {
1002 t.Errorf("want %q, got %q", want, output)
1003 }
1004 }
1005
1006 func TestLockOSThreadTemplateThreadRace(t *testing.T) {
1007 testenv.MustHaveGoRun(t)
1008
1009 exe, err := buildTestProg(t, "testprog")
1010 if err != nil {
1011 t.Fatal(err)
1012 }
1013
1014 iterations := 100
1015 if testing.Short() {
1016
1017
1018 iterations = 5
1019 }
1020 for i := 0; i < iterations; i++ {
1021 want := "OK\n"
1022 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace")
1023 if output != want {
1024 t.Fatalf("run %d: want %q, got %q", i, want, output)
1025 }
1026 }
1027 }
1028
1029
1030
1031
1032 func fakeSyscall(duration time.Duration) {
1033 runtime.Entersyscall()
1034 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
1035 }
1036 runtime.Exitsyscall()
1037 }
1038
1039
1040 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
1041 if runtime.GOARCH == "wasm" {
1042 t.Skip("no preemption on wasm yet")
1043 }
1044
1045 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
1046
1047 iterations := 10
1048 if testing.Short() {
1049 iterations = 1
1050 }
1051 const (
1052 maxDuration = 5 * time.Second
1053 nroutines = 8
1054 )
1055
1056 for i := 0; i < iterations; i++ {
1057 c := make(chan bool, nroutines)
1058 stop := uint32(0)
1059
1060 start := time.Now()
1061 for g := 0; g < nroutines; g++ {
1062 go func(stop *uint32) {
1063 c <- true
1064 for atomic.LoadUint32(stop) == 0 {
1065 fakeSyscall(syscallDuration)
1066 }
1067 c <- true
1068 }(&stop)
1069 }
1070
1071 for g := 0; g < nroutines; g++ {
1072 <-c
1073 }
1074 atomic.StoreUint32(&stop, 1)
1075
1076 for g := 0; g < nroutines; g++ {
1077 <-c
1078 }
1079 duration := time.Since(start)
1080
1081 if duration > maxDuration {
1082 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
1083 }
1084 }
1085 }
1086
1087 func TestPreemptionAfterSyscall(t *testing.T) {
1088 if runtime.GOOS == "plan9" {
1089 testenv.SkipFlaky(t, 41015)
1090 }
1091
1092 for _, i := range []time.Duration{10, 100, 1000} {
1093 d := i * time.Microsecond
1094 t.Run(fmt.Sprint(d), func(t *testing.T) {
1095 testPreemptionAfterSyscall(t, d)
1096 })
1097 }
1098 }
1099
1100 func TestGetgThreadSwitch(t *testing.T) {
1101 runtime.RunGetgThreadSwitchTest()
1102 }
1103
1104
1105
1106
1107
1108 func TestNetpollBreak(t *testing.T) {
1109 if runtime.GOMAXPROCS(0) == 1 {
1110 t.Skip("skipping: GOMAXPROCS=1")
1111 }
1112
1113
1114 runtime.NetpollGenericInit()
1115
1116 start := time.Now()
1117 c := make(chan bool, 2)
1118 go func() {
1119 c <- true
1120 runtime.Netpoll(10 * time.Second.Nanoseconds())
1121 c <- true
1122 }()
1123 <-c
1124
1125
1126
1127 loop:
1128 for {
1129 runtime.Usleep(100)
1130 runtime.NetpollBreak()
1131 runtime.NetpollBreak()
1132 select {
1133 case <-c:
1134 break loop
1135 default:
1136 }
1137 }
1138 if dur := time.Since(start); dur > 5*time.Second {
1139 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur)
1140 }
1141 }
1142
1143
1144
1145 func TestBigGOMAXPROCS(t *testing.T) {
1146 t.Parallel()
1147 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024")
1148
1149 for _, errstr := range []string{
1150 "failed to create new OS thread",
1151 "cannot allocate memory",
1152 } {
1153 if strings.Contains(output, errstr) {
1154 t.Skipf("failed to create 1024 threads")
1155 }
1156 }
1157 if !strings.Contains(output, "unknown function: NonexistentTest") {
1158 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output)
1159 }
1160 }
1161
View as plain text