Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/strconv"
15 "internal/runtime/sys"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291 exitHooksRun := false
292 if raceenabled {
293 runExitHooks(0)
294 exitHooksRun = true
295 racefini()
296 }
297
298
299
300
301
302
303
304
305 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
306 runExitHooks(0)
307 exitHooksRun = true
308 lsandoleakcheck()
309 }
310
311
312
313
314
315 if runningPanicDefers.Load() != 0 {
316
317 for c := 0; c < 1000; c++ {
318 if runningPanicDefers.Load() == 0 {
319 break
320 }
321 Gosched()
322 }
323 }
324 if panicking.Load() != 0 {
325 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
326 }
327 if !exitHooksRun {
328 runExitHooks(0)
329 }
330
331 exit(0)
332 for {
333 var x *int32
334 *x = 0
335 }
336 }
337
338
339
340
341 func os_beforeExit(exitCode int) {
342 runExitHooks(exitCode)
343 if exitCode == 0 && raceenabled {
344 racefini()
345 }
346
347
348 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
349 lsandoleakcheck()
350 }
351 }
352
353 func init() {
354 exithook.Gosched = Gosched
355 exithook.Goid = func() uint64 { return getg().goid }
356 exithook.Throw = throw
357 }
358
359 func runExitHooks(code int) {
360 exithook.Run(code)
361 }
362
363
364 func init() {
365 go forcegchelper()
366 }
367
368 func forcegchelper() {
369 forcegc.g = getg()
370 lockInit(&forcegc.lock, lockRankForcegc)
371 for {
372 lock(&forcegc.lock)
373 if forcegc.idle.Load() {
374 throw("forcegc: phase error")
375 }
376 forcegc.idle.Store(true)
377 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
378
379 if debug.gctrace > 0 {
380 println("GC forced")
381 }
382
383 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
384 }
385 }
386
387
388
389
390
391 func Gosched() {
392 checkTimeouts()
393 mcall(gosched_m)
394 }
395
396
397
398
399
400 func goschedguarded() {
401 mcall(goschedguarded_m)
402 }
403
404
405
406
407
408
409 func goschedIfBusy() {
410 gp := getg()
411
412
413 if !gp.preempt && sched.npidle.Load() > 0 {
414 return
415 }
416 mcall(gosched_m)
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
448 if reason != waitReasonSleep {
449 checkTimeouts()
450 }
451 mp := acquirem()
452 gp := mp.curg
453 status := readgstatus(gp)
454 if status != _Grunning && status != _Gscanrunning {
455 throw("gopark: bad g status")
456 }
457 mp.waitlock = lock
458 mp.waitunlockf = unlockf
459 gp.waitreason = reason
460 mp.waitTraceBlockReason = traceReason
461 mp.waitTraceSkip = traceskip
462 releasem(mp)
463
464 mcall(park_m)
465 }
466
467
468
469 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
470 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
471 }
472
473
474
475
476
477
478
479
480
481
482
483 func goready(gp *g, traceskip int) {
484 systemstack(func() {
485 ready(gp, traceskip, true)
486 })
487 }
488
489
490 func acquireSudog() *sudog {
491
492
493
494
495
496
497
498
499 mp := acquirem()
500 pp := mp.p.ptr()
501 if len(pp.sudogcache) == 0 {
502 lock(&sched.sudoglock)
503
504 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
505 s := sched.sudogcache
506 sched.sudogcache = s.next
507 s.next = nil
508 pp.sudogcache = append(pp.sudogcache, s)
509 }
510 unlock(&sched.sudoglock)
511
512 if len(pp.sudogcache) == 0 {
513 pp.sudogcache = append(pp.sudogcache, new(sudog))
514 }
515 }
516 n := len(pp.sudogcache)
517 s := pp.sudogcache[n-1]
518 pp.sudogcache[n-1] = nil
519 pp.sudogcache = pp.sudogcache[:n-1]
520 if s.elem != nil {
521 throw("acquireSudog: found s.elem != nil in cache")
522 }
523 releasem(mp)
524 return s
525 }
526
527
528 func releaseSudog(s *sudog) {
529 if s.elem != nil {
530 throw("runtime: sudog with non-nil elem")
531 }
532 if s.isSelect {
533 throw("runtime: sudog with non-false isSelect")
534 }
535 if s.next != nil {
536 throw("runtime: sudog with non-nil next")
537 }
538 if s.prev != nil {
539 throw("runtime: sudog with non-nil prev")
540 }
541 if s.waitlink != nil {
542 throw("runtime: sudog with non-nil waitlink")
543 }
544 if s.c != nil {
545 throw("runtime: sudog with non-nil c")
546 }
547 gp := getg()
548 if gp.param != nil {
549 throw("runtime: releaseSudog with non-nil gp.param")
550 }
551 mp := acquirem()
552 pp := mp.p.ptr()
553 if len(pp.sudogcache) == cap(pp.sudogcache) {
554
555 var first, last *sudog
556 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
557 n := len(pp.sudogcache)
558 p := pp.sudogcache[n-1]
559 pp.sudogcache[n-1] = nil
560 pp.sudogcache = pp.sudogcache[:n-1]
561 if first == nil {
562 first = p
563 } else {
564 last.next = p
565 }
566 last = p
567 }
568 lock(&sched.sudoglock)
569 last.next = sched.sudogcache
570 sched.sudogcache = first
571 unlock(&sched.sudoglock)
572 }
573 pp.sudogcache = append(pp.sudogcache, s)
574 releasem(mp)
575 }
576
577
578 func badmcall(fn func(*g)) {
579 throw("runtime: mcall called on m->g0 stack")
580 }
581
582 func badmcall2(fn func(*g)) {
583 throw("runtime: mcall function returned")
584 }
585
586 func badreflectcall() {
587 panic(plainError("arg size to reflect.call more than 1GB"))
588 }
589
590
591
592 func badmorestackg0() {
593 if !crashStackImplemented {
594 writeErrStr("fatal: morestack on g0\n")
595 return
596 }
597
598 g := getg()
599 switchToCrashStack(func() {
600 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
601 g.m.traceback = 2
602 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
603 print("\n")
604
605 throw("morestack on g0")
606 })
607 }
608
609
610
611 func badmorestackgsignal() {
612 writeErrStr("fatal: morestack on gsignal\n")
613 }
614
615
616 func badctxt() {
617 throw("ctxt != 0")
618 }
619
620
621
622 var gcrash g
623
624 var crashingG atomic.Pointer[g]
625
626
627
628
629
630
631
632
633
634 func switchToCrashStack(fn func()) {
635 me := getg()
636 if crashingG.CompareAndSwapNoWB(nil, me) {
637 switchToCrashStack0(fn)
638 abort()
639 }
640 if crashingG.Load() == me {
641
642 writeErrStr("fatal: recursive switchToCrashStack\n")
643 abort()
644 }
645
646 usleep_no_g(100)
647 writeErrStr("fatal: concurrent switchToCrashStack\n")
648 abort()
649 }
650
651
652
653
654 const crashStackImplemented = GOOS != "windows"
655
656
657 func switchToCrashStack0(fn func())
658
659 func lockedOSThread() bool {
660 gp := getg()
661 return gp.lockedm != 0 && gp.m.lockedg != 0
662 }
663
664 var (
665
666
667
668
669
670
671 allglock mutex
672 allgs []*g
673
674
675
676
677
678
679
680
681
682
683
684
685
686 allglen uintptr
687 allgptr **g
688 )
689
690 func allgadd(gp *g) {
691 if readgstatus(gp) == _Gidle {
692 throw("allgadd: bad status Gidle")
693 }
694
695 lock(&allglock)
696 allgs = append(allgs, gp)
697 if &allgs[0] != allgptr {
698 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
699 }
700 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
701 unlock(&allglock)
702 }
703
704
705
706
707 func allGsSnapshot() []*g {
708 assertWorldStoppedOrLockHeld(&allglock)
709
710
711
712
713
714
715 return allgs[:len(allgs):len(allgs)]
716 }
717
718
719 func atomicAllG() (**g, uintptr) {
720 length := atomic.Loaduintptr(&allglen)
721 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
722 return ptr, length
723 }
724
725
726 func atomicAllGIndex(ptr **g, i uintptr) *g {
727 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
728 }
729
730
731
732
733 func forEachG(fn func(gp *g)) {
734 lock(&allglock)
735 for _, gp := range allgs {
736 fn(gp)
737 }
738 unlock(&allglock)
739 }
740
741
742
743
744
745 func forEachGRace(fn func(gp *g)) {
746 ptr, length := atomicAllG()
747 for i := uintptr(0); i < length; i++ {
748 gp := atomicAllGIndex(ptr, i)
749 fn(gp)
750 }
751 return
752 }
753
754 const (
755
756
757 _GoidCacheBatch = 16
758 )
759
760
761
762 func cpuinit(env string) {
763 cpu.Initialize(env)
764
765
766
767 switch GOARCH {
768 case "386", "amd64":
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771 x86HasFMA = cpu.X86.HasFMA
772
773 case "arm":
774 armHasVFPv4 = cpu.ARM.HasVFPv4
775
776 case "arm64":
777 arm64HasATOMICS = cpu.ARM64.HasATOMICS
778
779 case "loong64":
780 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
781 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
782 loong64HasLSX = cpu.Loong64.HasLSX
783
784 case "riscv64":
785 riscv64HasZbb = cpu.RISCV64.HasZbb
786 }
787 }
788
789
790
791
792 func getGodebugEarly() string {
793 const prefix = "GODEBUG="
794 var env string
795 switch GOOS {
796 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
797
798
799
800 n := int32(0)
801 for argv_index(argv, argc+1+n) != nil {
802 n++
803 }
804
805 for i := int32(0); i < n; i++ {
806 p := argv_index(argv, argc+1+i)
807 s := unsafe.String(p, findnull(p))
808
809 if stringslite.HasPrefix(s, prefix) {
810 env = gostring(p)[len(prefix):]
811 break
812 }
813 }
814 }
815 return env
816 }
817
818
819
820
821
822
823
824
825
826 func schedinit() {
827 lockInit(&sched.lock, lockRankSched)
828 lockInit(&sched.sysmonlock, lockRankSysmon)
829 lockInit(&sched.deferlock, lockRankDefer)
830 lockInit(&sched.sudoglock, lockRankSudog)
831 lockInit(&deadlock, lockRankDeadlock)
832 lockInit(&paniclk, lockRankPanic)
833 lockInit(&allglock, lockRankAllg)
834 lockInit(&allpLock, lockRankAllp)
835 lockInit(&reflectOffs.lock, lockRankReflectOffs)
836 lockInit(&finlock, lockRankFin)
837 lockInit(&cpuprof.lock, lockRankCpuprof)
838 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
839 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
840 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
841 traceLockInit()
842
843
844
845 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
846
847 lockVerifyMSize()
848
849
850
851 gp := getg()
852 if raceenabled {
853 gp.racectx, raceprocctx0 = raceinit()
854 }
855
856 sched.maxmcount = 10000
857 crashFD.Store(^uintptr(0))
858
859
860 worldStopped()
861
862 ticks.init()
863 moduledataverify()
864 stackinit()
865 randinit()
866 mallocinit()
867 godebug := getGodebugEarly()
868 cpuinit(godebug)
869 alginit()
870 mcommoninit(gp.m, -1)
871 modulesinit()
872 typelinksinit()
873 itabsinit()
874 stkobjinit()
875
876 sigsave(&gp.m.sigmask)
877 initSigmask = gp.m.sigmask
878
879 goargs()
880 goenvs()
881 secure()
882 checkfds()
883 parsedebugvars()
884 gcinit()
885
886
887
888 gcrash.stack = stackalloc(16384)
889 gcrash.stackguard0 = gcrash.stack.lo + 1000
890 gcrash.stackguard1 = gcrash.stack.lo + 1000
891
892
893
894
895
896 if disableMemoryProfiling {
897 MemProfileRate = 0
898 }
899
900
901 mProfStackInit(gp.m)
902 defaultGOMAXPROCSInit()
903
904 lock(&sched.lock)
905 sched.lastpoll.Store(nanotime())
906 var procs int32
907 if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
908 procs = n
909 sched.customGOMAXPROCS = true
910 } else {
911
912
913
914
915
916
917
918
919 procs = defaultGOMAXPROCS(numCPUStartup)
920 }
921 if procresize(procs) != nil {
922 throw("unknown runnable goroutine during bootstrap")
923 }
924 unlock(&sched.lock)
925
926
927 worldStarted()
928
929 if buildVersion == "" {
930
931
932 buildVersion = "unknown"
933 }
934 if len(modinfo) == 1 {
935
936
937 modinfo = ""
938 }
939 }
940
941 func dumpgstatus(gp *g) {
942 thisg := getg()
943 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
944 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
945 }
946
947
948 func checkmcount() {
949 assertLockHeld(&sched.lock)
950
951
952
953
954
955
956
957
958
959 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
960 if count > sched.maxmcount {
961 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
962 throw("thread exhaustion")
963 }
964 }
965
966
967
968
969
970 func mReserveID() int64 {
971 assertLockHeld(&sched.lock)
972
973 if sched.mnext+1 < sched.mnext {
974 throw("runtime: thread ID overflow")
975 }
976 id := sched.mnext
977 sched.mnext++
978 checkmcount()
979 return id
980 }
981
982
983 func mcommoninit(mp *m, id int64) {
984 gp := getg()
985
986
987 if gp != gp.m.g0 {
988 callers(1, mp.createstack[:])
989 }
990
991 lock(&sched.lock)
992
993 if id >= 0 {
994 mp.id = id
995 } else {
996 mp.id = mReserveID()
997 }
998
999 mrandinit(mp)
1000
1001 mpreinit(mp)
1002 if mp.gsignal != nil {
1003 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1004 }
1005
1006
1007
1008 mp.alllink = allm
1009
1010
1011
1012 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1013 unlock(&sched.lock)
1014
1015
1016 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1017 mp.cgoCallers = new(cgoCallers)
1018 }
1019 mProfStackInit(mp)
1020 }
1021
1022
1023
1024
1025
1026 func mProfStackInit(mp *m) {
1027 if debug.profstackdepth == 0 {
1028
1029
1030 return
1031 }
1032 mp.profStack = makeProfStackFP()
1033 mp.mLockProfile.stack = makeProfStackFP()
1034 }
1035
1036
1037
1038
1039 func makeProfStackFP() []uintptr {
1040
1041
1042
1043
1044
1045
1046 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1047 }
1048
1049
1050
1051 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1052
1053
1054 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1055
1056 func (mp *m) becomeSpinning() {
1057 mp.spinning = true
1058 sched.nmspinning.Add(1)
1059 sched.needspinning.Store(0)
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069 func (mp *m) snapshotAllp() []*p {
1070 mp.allpSnapshot = allp
1071 return mp.allpSnapshot
1072 }
1073
1074
1075
1076
1077
1078
1079
1080 func (mp *m) clearAllpSnapshot() {
1081 mp.allpSnapshot = nil
1082 }
1083
1084 func (mp *m) hasCgoOnStack() bool {
1085 return mp.ncgo > 0 || mp.isextra
1086 }
1087
1088 const (
1089
1090
1091 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1092
1093
1094
1095 osHasLowResClockInt = goos.IsWindows
1096
1097
1098
1099 osHasLowResClock = osHasLowResClockInt > 0
1100 )
1101
1102
1103 func ready(gp *g, traceskip int, next bool) {
1104 status := readgstatus(gp)
1105
1106
1107 mp := acquirem()
1108 if status&^_Gscan != _Gwaiting {
1109 dumpgstatus(gp)
1110 throw("bad g->status in ready")
1111 }
1112
1113
1114 trace := traceAcquire()
1115 casgstatus(gp, _Gwaiting, _Grunnable)
1116 if trace.ok() {
1117 trace.GoUnpark(gp, traceskip)
1118 traceRelease(trace)
1119 }
1120 runqput(mp.p.ptr(), gp, next)
1121 wakep()
1122 releasem(mp)
1123 }
1124
1125
1126
1127 const freezeStopWait = 0x7fffffff
1128
1129
1130
1131 var freezing atomic.Bool
1132
1133
1134
1135
1136 func freezetheworld() {
1137 freezing.Store(true)
1138 if debug.dontfreezetheworld > 0 {
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 usleep(1000)
1164 return
1165 }
1166
1167
1168
1169
1170 for i := 0; i < 5; i++ {
1171
1172 sched.stopwait = freezeStopWait
1173 sched.gcwaiting.Store(true)
1174
1175 if !preemptall() {
1176 break
1177 }
1178 usleep(1000)
1179 }
1180
1181 usleep(1000)
1182 preemptall()
1183 usleep(1000)
1184 }
1185
1186
1187
1188
1189
1190 func readgstatus(gp *g) uint32 {
1191 return gp.atomicstatus.Load()
1192 }
1193
1194
1195
1196
1197
1198 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1199 success := false
1200
1201
1202 switch oldval {
1203 default:
1204 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1205 dumpgstatus(gp)
1206 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1207 case _Gscanrunnable,
1208 _Gscanwaiting,
1209 _Gscanrunning,
1210 _Gscansyscall,
1211 _Gscanpreempted:
1212 if newval == oldval&^_Gscan {
1213 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1214 }
1215 }
1216 if !success {
1217 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1218 dumpgstatus(gp)
1219 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1220 }
1221 releaseLockRankAndM(lockRankGscan)
1222 }
1223
1224
1225
1226 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1227 switch oldval {
1228 case _Grunnable,
1229 _Grunning,
1230 _Gwaiting,
1231 _Gsyscall:
1232 if newval == oldval|_Gscan {
1233 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1234 if r {
1235 acquireLockRankAndM(lockRankGscan)
1236 }
1237 return r
1238
1239 }
1240 }
1241 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1242 throw("castogscanstatus")
1243 panic("not reached")
1244 }
1245
1246
1247
1248 var casgstatusAlwaysTrack = false
1249
1250
1251
1252
1253
1254
1255
1256 func casgstatus(gp *g, oldval, newval uint32) {
1257 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1258 systemstack(func() {
1259
1260
1261 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1262 throw("casgstatus: bad incoming values")
1263 })
1264 }
1265
1266 lockWithRankMayAcquire(nil, lockRankGscan)
1267
1268
1269 const yieldDelay = 5 * 1000
1270 var nextYield int64
1271
1272
1273
1274 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1275 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1276 systemstack(func() {
1277
1278
1279 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1280 })
1281 }
1282 if i == 0 {
1283 nextYield = nanotime() + yieldDelay
1284 }
1285 if nanotime() < nextYield {
1286 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1287 procyield(1)
1288 }
1289 } else {
1290 osyield()
1291 nextYield = nanotime() + yieldDelay/2
1292 }
1293 }
1294
1295 if gp.bubble != nil {
1296 systemstack(func() {
1297 gp.bubble.changegstatus(gp, oldval, newval)
1298 })
1299 }
1300
1301 if oldval == _Grunning {
1302
1303 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1304 gp.tracking = true
1305 }
1306 gp.trackingSeq++
1307 }
1308 if !gp.tracking {
1309 return
1310 }
1311
1312
1313
1314
1315
1316
1317 switch oldval {
1318 case _Grunnable:
1319
1320
1321
1322 now := nanotime()
1323 gp.runnableTime += now - gp.trackingStamp
1324 gp.trackingStamp = 0
1325 case _Gwaiting:
1326 if !gp.waitreason.isMutexWait() {
1327
1328 break
1329 }
1330
1331
1332
1333
1334
1335 now := nanotime()
1336 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1337 gp.trackingStamp = 0
1338 }
1339 switch newval {
1340 case _Gwaiting:
1341 if !gp.waitreason.isMutexWait() {
1342
1343 break
1344 }
1345
1346 now := nanotime()
1347 gp.trackingStamp = now
1348 case _Grunnable:
1349
1350
1351 now := nanotime()
1352 gp.trackingStamp = now
1353 case _Grunning:
1354
1355
1356
1357 gp.tracking = false
1358 sched.timeToRun.record(gp.runnableTime)
1359 gp.runnableTime = 0
1360 }
1361 }
1362
1363
1364
1365
1366 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1367
1368 gp.waitreason = reason
1369 casgstatus(gp, old, _Gwaiting)
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1380 if !reason.isWaitingForSuspendG() {
1381 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1382 }
1383 casGToWaiting(gp, old, reason)
1384 }
1385
1386
1387
1388
1389
1390 func casGToPreemptScan(gp *g, old, new uint32) {
1391 if old != _Grunning || new != _Gscan|_Gpreempted {
1392 throw("bad g transition")
1393 }
1394 acquireLockRankAndM(lockRankGscan)
1395 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1396 }
1397
1398
1399
1400
1401
1402
1403 }
1404
1405
1406
1407
1408 func casGFromPreempted(gp *g, old, new uint32) bool {
1409 if old != _Gpreempted || new != _Gwaiting {
1410 throw("bad g transition")
1411 }
1412 gp.waitreason = waitReasonPreempted
1413 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1414 return false
1415 }
1416 if bubble := gp.bubble; bubble != nil {
1417 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1418 }
1419 return true
1420 }
1421
1422
1423 type stwReason uint8
1424
1425
1426
1427
1428 const (
1429 stwUnknown stwReason = iota
1430 stwGCMarkTerm
1431 stwGCSweepTerm
1432 stwWriteHeapDump
1433 stwGoroutineProfile
1434 stwGoroutineProfileCleanup
1435 stwAllGoroutinesStack
1436 stwReadMemStats
1437 stwAllThreadsSyscall
1438 stwGOMAXPROCS
1439 stwStartTrace
1440 stwStopTrace
1441 stwForTestCountPagesInUse
1442 stwForTestReadMetricsSlow
1443 stwForTestReadMemStatsSlow
1444 stwForTestPageCachePagesLeaked
1445 stwForTestResetDebugLog
1446 )
1447
1448 func (r stwReason) String() string {
1449 return stwReasonStrings[r]
1450 }
1451
1452 func (r stwReason) isGC() bool {
1453 return r == stwGCMarkTerm || r == stwGCSweepTerm
1454 }
1455
1456
1457
1458
1459 var stwReasonStrings = [...]string{
1460 stwUnknown: "unknown",
1461 stwGCMarkTerm: "GC mark termination",
1462 stwGCSweepTerm: "GC sweep termination",
1463 stwWriteHeapDump: "write heap dump",
1464 stwGoroutineProfile: "goroutine profile",
1465 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1466 stwAllGoroutinesStack: "all goroutines stack trace",
1467 stwReadMemStats: "read mem stats",
1468 stwAllThreadsSyscall: "AllThreadsSyscall",
1469 stwGOMAXPROCS: "GOMAXPROCS",
1470 stwStartTrace: "start trace",
1471 stwStopTrace: "stop trace",
1472 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1473 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1474 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1475 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1476 stwForTestResetDebugLog: "ResetDebugLog (test)",
1477 }
1478
1479
1480
1481 type worldStop struct {
1482 reason stwReason
1483 startedStopping int64
1484 finishedStopping int64
1485 stoppingCPUTime int64
1486 }
1487
1488
1489
1490
1491 var stopTheWorldContext worldStop
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 func stopTheWorld(reason stwReason) worldStop {
1511 semacquire(&worldsema)
1512 gp := getg()
1513 gp.m.preemptoff = reason.String()
1514 systemstack(func() {
1515 stopTheWorldContext = stopTheWorldWithSema(reason)
1516 })
1517 return stopTheWorldContext
1518 }
1519
1520
1521
1522
1523 func startTheWorld(w worldStop) {
1524 systemstack(func() { startTheWorldWithSema(0, w) })
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 mp := acquirem()
1542 mp.preemptoff = ""
1543 semrelease1(&worldsema, true, 0)
1544 releasem(mp)
1545 }
1546
1547
1548
1549
1550 func stopTheWorldGC(reason stwReason) worldStop {
1551 semacquire(&gcsema)
1552 return stopTheWorld(reason)
1553 }
1554
1555
1556
1557
1558 func startTheWorldGC(w worldStop) {
1559 startTheWorld(w)
1560 semrelease(&gcsema)
1561 }
1562
1563
1564 var worldsema uint32 = 1
1565
1566
1567
1568
1569
1570
1571
1572 var gcsema uint32 = 1
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 func stopTheWorldWithSema(reason stwReason) worldStop {
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1620
1621 trace := traceAcquire()
1622 if trace.ok() {
1623 trace.STWStart(reason)
1624 traceRelease(trace)
1625 }
1626 gp := getg()
1627
1628
1629
1630 if gp.m.locks > 0 {
1631 throw("stopTheWorld: holding locks")
1632 }
1633
1634 lock(&sched.lock)
1635 start := nanotime()
1636 sched.stopwait = gomaxprocs
1637 sched.gcwaiting.Store(true)
1638 preemptall()
1639
1640 gp.m.p.ptr().status = _Pgcstop
1641 gp.m.p.ptr().gcStopTime = start
1642 sched.stopwait--
1643
1644 trace = traceAcquire()
1645 for _, pp := range allp {
1646 s := pp.status
1647 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1648 if trace.ok() {
1649 trace.ProcSteal(pp, false)
1650 }
1651 sched.nGsyscallNoP.Add(1)
1652 pp.syscalltick++
1653 pp.gcStopTime = nanotime()
1654 sched.stopwait--
1655 }
1656 }
1657 if trace.ok() {
1658 traceRelease(trace)
1659 }
1660
1661
1662 now := nanotime()
1663 for {
1664 pp, _ := pidleget(now)
1665 if pp == nil {
1666 break
1667 }
1668 pp.status = _Pgcstop
1669 pp.gcStopTime = nanotime()
1670 sched.stopwait--
1671 }
1672 wait := sched.stopwait > 0
1673 unlock(&sched.lock)
1674
1675
1676 if wait {
1677 for {
1678
1679 if notetsleep(&sched.stopnote, 100*1000) {
1680 noteclear(&sched.stopnote)
1681 break
1682 }
1683 preemptall()
1684 }
1685 }
1686
1687 finish := nanotime()
1688 startTime := finish - start
1689 if reason.isGC() {
1690 sched.stwStoppingTimeGC.record(startTime)
1691 } else {
1692 sched.stwStoppingTimeOther.record(startTime)
1693 }
1694
1695
1696
1697
1698
1699 stoppingCPUTime := int64(0)
1700 bad := ""
1701 if sched.stopwait != 0 {
1702 bad = "stopTheWorld: not stopped (stopwait != 0)"
1703 } else {
1704 for _, pp := range allp {
1705 if pp.status != _Pgcstop {
1706 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1707 }
1708 if pp.gcStopTime == 0 && bad == "" {
1709 bad = "stopTheWorld: broken CPU time accounting"
1710 }
1711 stoppingCPUTime += finish - pp.gcStopTime
1712 pp.gcStopTime = 0
1713 }
1714 }
1715 if freezing.Load() {
1716
1717
1718
1719
1720 lock(&deadlock)
1721 lock(&deadlock)
1722 }
1723 if bad != "" {
1724 throw(bad)
1725 }
1726
1727 worldStopped()
1728
1729
1730 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1731
1732 return worldStop{
1733 reason: reason,
1734 startedStopping: start,
1735 finishedStopping: finish,
1736 stoppingCPUTime: stoppingCPUTime,
1737 }
1738 }
1739
1740
1741
1742
1743
1744
1745
1746 func startTheWorldWithSema(now int64, w worldStop) int64 {
1747 assertWorldStopped()
1748
1749 mp := acquirem()
1750 if netpollinited() {
1751 list, delta := netpoll(0)
1752 injectglist(&list)
1753 netpollAdjustWaiters(delta)
1754 }
1755 lock(&sched.lock)
1756
1757 procs := gomaxprocs
1758 if newprocs != 0 {
1759 procs = newprocs
1760 newprocs = 0
1761 }
1762 p1 := procresize(procs)
1763 sched.gcwaiting.Store(false)
1764 if sched.sysmonwait.Load() {
1765 sched.sysmonwait.Store(false)
1766 notewakeup(&sched.sysmonnote)
1767 }
1768 unlock(&sched.lock)
1769
1770 worldStarted()
1771
1772 for p1 != nil {
1773 p := p1
1774 p1 = p1.link.ptr()
1775 if p.m != 0 {
1776 mp := p.m.ptr()
1777 p.m = 0
1778 if mp.nextp != 0 {
1779 throw("startTheWorld: inconsistent mp->nextp")
1780 }
1781 mp.nextp.set(p)
1782 notewakeup(&mp.park)
1783 } else {
1784
1785 newm(nil, p, -1)
1786 }
1787 }
1788
1789
1790 if now == 0 {
1791 now = nanotime()
1792 }
1793 totalTime := now - w.startedStopping
1794 if w.reason.isGC() {
1795 sched.stwTotalTimeGC.record(totalTime)
1796 } else {
1797 sched.stwTotalTimeOther.record(totalTime)
1798 }
1799 trace := traceAcquire()
1800 if trace.ok() {
1801 trace.STWDone()
1802 traceRelease(trace)
1803 }
1804
1805
1806
1807
1808 wakep()
1809
1810 releasem(mp)
1811
1812 return now
1813 }
1814
1815
1816
1817 func usesLibcall() bool {
1818 switch GOOS {
1819 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1820 return true
1821 }
1822 return false
1823 }
1824
1825
1826
1827 func mStackIsSystemAllocated() bool {
1828 switch GOOS {
1829 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1830 return true
1831 }
1832 return false
1833 }
1834
1835
1836
1837 func mstart()
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 func mstart0() {
1849 gp := getg()
1850
1851 osStack := gp.stack.lo == 0
1852 if osStack {
1853
1854
1855
1856
1857
1858
1859
1860
1861 size := gp.stack.hi
1862 if size == 0 {
1863 size = 16384 * sys.StackGuardMultiplier
1864 }
1865 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1866 gp.stack.lo = gp.stack.hi - size + 1024
1867 }
1868
1869
1870 gp.stackguard0 = gp.stack.lo + stackGuard
1871
1872
1873 gp.stackguard1 = gp.stackguard0
1874 mstart1()
1875
1876
1877 if mStackIsSystemAllocated() {
1878
1879
1880
1881 osStack = true
1882 }
1883 mexit(osStack)
1884 }
1885
1886
1887
1888
1889
1890 func mstart1() {
1891 gp := getg()
1892
1893 if gp != gp.m.g0 {
1894 throw("bad runtime·mstart")
1895 }
1896
1897
1898
1899
1900
1901
1902
1903 gp.sched.g = guintptr(unsafe.Pointer(gp))
1904 gp.sched.pc = sys.GetCallerPC()
1905 gp.sched.sp = sys.GetCallerSP()
1906
1907 asminit()
1908 minit()
1909
1910
1911
1912 if gp.m == &m0 {
1913 mstartm0()
1914 }
1915
1916 if debug.dataindependenttiming == 1 {
1917 sys.EnableDIT()
1918 }
1919
1920 if fn := gp.m.mstartfn; fn != nil {
1921 fn()
1922 }
1923
1924 if gp.m != &m0 {
1925 acquirep(gp.m.nextp.ptr())
1926 gp.m.nextp = 0
1927 }
1928 schedule()
1929 }
1930
1931
1932
1933
1934
1935
1936
1937 func mstartm0() {
1938
1939
1940
1941 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1942 cgoHasExtraM = true
1943 newextram()
1944 }
1945 initsig(false)
1946 }
1947
1948
1949
1950
1951 func mPark() {
1952 gp := getg()
1953 notesleep(&gp.m.park)
1954 noteclear(&gp.m.park)
1955 }
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967 func mexit(osStack bool) {
1968 mp := getg().m
1969
1970 if mp == &m0 {
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982 handoffp(releasep())
1983 lock(&sched.lock)
1984 sched.nmfreed++
1985 checkdead()
1986 unlock(&sched.lock)
1987 mPark()
1988 throw("locked m0 woke up")
1989 }
1990
1991 sigblock(true)
1992 unminit()
1993
1994
1995 if mp.gsignal != nil {
1996 stackfree(mp.gsignal.stack)
1997 if valgrindenabled {
1998 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
1999 mp.gsignal.valgrindStackID = 0
2000 }
2001
2002
2003
2004
2005 mp.gsignal = nil
2006 }
2007
2008
2009 vgetrandomDestroy(mp)
2010
2011
2012 lock(&sched.lock)
2013 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2014 if *pprev == mp {
2015 *pprev = mp.alllink
2016 goto found
2017 }
2018 }
2019 throw("m not found in allm")
2020 found:
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 mp.freeWait.Store(freeMWait)
2036 mp.freelink = sched.freem
2037 sched.freem = mp
2038 unlock(&sched.lock)
2039
2040 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2041 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2042
2043
2044 handoffp(releasep())
2045
2046
2047
2048
2049
2050 lock(&sched.lock)
2051 sched.nmfreed++
2052 checkdead()
2053 unlock(&sched.lock)
2054
2055 if GOOS == "darwin" || GOOS == "ios" {
2056
2057
2058 if mp.signalPending.Load() != 0 {
2059 pendingPreemptSignals.Add(-1)
2060 }
2061 }
2062
2063
2064
2065 mdestroy(mp)
2066
2067 if osStack {
2068
2069 mp.freeWait.Store(freeMRef)
2070
2071
2072
2073 return
2074 }
2075
2076
2077
2078
2079
2080 exitThread(&mp.freeWait)
2081 }
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093 func forEachP(reason waitReason, fn func(*p)) {
2094 systemstack(func() {
2095 gp := getg().m.curg
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 casGToWaitingForSuspendG(gp, _Grunning, reason)
2108 forEachPInternal(fn)
2109 casgstatus(gp, _Gwaiting, _Grunning)
2110 })
2111 }
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 func forEachPInternal(fn func(*p)) {
2123 mp := acquirem()
2124 pp := getg().m.p.ptr()
2125
2126 lock(&sched.lock)
2127 if sched.safePointWait != 0 {
2128 throw("forEachP: sched.safePointWait != 0")
2129 }
2130 sched.safePointWait = gomaxprocs - 1
2131 sched.safePointFn = fn
2132
2133
2134 for _, p2 := range allp {
2135 if p2 != pp {
2136 atomic.Store(&p2.runSafePointFn, 1)
2137 }
2138 }
2139 preemptall()
2140
2141
2142
2143
2144
2145
2146
2147 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2148 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2149 fn(p)
2150 sched.safePointWait--
2151 }
2152 }
2153
2154 wait := sched.safePointWait > 0
2155 unlock(&sched.lock)
2156
2157
2158 fn(pp)
2159
2160
2161
2162 for _, p2 := range allp {
2163 s := p2.status
2164
2165
2166
2167 trace := traceAcquire()
2168 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2169 if trace.ok() {
2170
2171 trace.ProcSteal(p2, false)
2172 traceRelease(trace)
2173 }
2174 sched.nGsyscallNoP.Add(1)
2175 p2.syscalltick++
2176 handoffp(p2)
2177 } else if trace.ok() {
2178 traceRelease(trace)
2179 }
2180 }
2181
2182
2183 if wait {
2184 for {
2185
2186
2187
2188
2189 if notetsleep(&sched.safePointNote, 100*1000) {
2190 noteclear(&sched.safePointNote)
2191 break
2192 }
2193 preemptall()
2194 }
2195 }
2196 if sched.safePointWait != 0 {
2197 throw("forEachP: not done")
2198 }
2199 for _, p2 := range allp {
2200 if p2.runSafePointFn != 0 {
2201 throw("forEachP: P did not run fn")
2202 }
2203 }
2204
2205 lock(&sched.lock)
2206 sched.safePointFn = nil
2207 unlock(&sched.lock)
2208 releasem(mp)
2209 }
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 func runSafePointFn() {
2223 p := getg().m.p.ptr()
2224
2225
2226
2227 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2228 return
2229 }
2230 sched.safePointFn(p)
2231 lock(&sched.lock)
2232 sched.safePointWait--
2233 if sched.safePointWait == 0 {
2234 notewakeup(&sched.safePointNote)
2235 }
2236 unlock(&sched.lock)
2237 }
2238
2239
2240
2241
2242 var cgoThreadStart unsafe.Pointer
2243
2244 type cgothreadstart struct {
2245 g guintptr
2246 tls *uint64
2247 fn unsafe.Pointer
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 func allocm(pp *p, fn func(), id int64) *m {
2260 allocmLock.rlock()
2261
2262
2263
2264
2265 acquirem()
2266
2267 gp := getg()
2268 if gp.m.p == 0 {
2269 acquirep(pp)
2270 }
2271
2272
2273
2274 if sched.freem != nil {
2275 lock(&sched.lock)
2276 var newList *m
2277 for freem := sched.freem; freem != nil; {
2278
2279 wait := freem.freeWait.Load()
2280 if wait == freeMWait {
2281 next := freem.freelink
2282 freem.freelink = newList
2283 newList = freem
2284 freem = next
2285 continue
2286 }
2287
2288
2289
2290 if traceEnabled() || traceShuttingDown() {
2291 traceThreadDestroy(freem)
2292 }
2293
2294
2295
2296 if wait == freeMStack {
2297
2298
2299
2300 systemstack(func() {
2301 stackfree(freem.g0.stack)
2302 if valgrindenabled {
2303 valgrindDeregisterStack(freem.g0.valgrindStackID)
2304 freem.g0.valgrindStackID = 0
2305 }
2306 })
2307 }
2308 freem = freem.freelink
2309 }
2310 sched.freem = newList
2311 unlock(&sched.lock)
2312 }
2313
2314 mp := &new(mPadded).m
2315 mp.mstartfn = fn
2316 mcommoninit(mp, id)
2317
2318
2319
2320 if iscgo || mStackIsSystemAllocated() {
2321 mp.g0 = malg(-1)
2322 } else {
2323 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2324 }
2325 mp.g0.m = mp
2326
2327 if pp == gp.m.p.ptr() {
2328 releasep()
2329 }
2330
2331 releasem(gp.m)
2332 allocmLock.runlock()
2333 return mp
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375 func needm(signal bool) {
2376 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2377
2378
2379
2380
2381
2382
2383 writeErrStr("fatal error: cgo callback before cgo call\n")
2384 exit(1)
2385 }
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 var sigmask sigset
2396 sigsave(&sigmask)
2397 sigblock(false)
2398
2399
2400
2401
2402 mp, last := getExtraM()
2403
2404
2405
2406
2407
2408
2409
2410
2411 mp.needextram = last
2412
2413
2414 mp.sigmask = sigmask
2415
2416
2417
2418 osSetupTLS(mp)
2419
2420
2421
2422 setg(mp.g0)
2423 sp := sys.GetCallerSP()
2424 callbackUpdateSystemStack(mp, sp, signal)
2425
2426
2427
2428
2429 mp.isExtraInC = false
2430
2431
2432 asminit()
2433 minit()
2434
2435
2436
2437
2438
2439
2440 var trace traceLocker
2441 if !signal {
2442 trace = traceAcquire()
2443 }
2444
2445
2446 casgstatus(mp.curg, _Gdead, _Gsyscall)
2447 sched.ngsys.Add(-1)
2448 sched.nGsyscallNoP.Add(1)
2449
2450 if !signal {
2451 if trace.ok() {
2452 trace.GoCreateSyscall(mp.curg)
2453 traceRelease(trace)
2454 }
2455 }
2456 mp.isExtraInSig = signal
2457 }
2458
2459
2460
2461
2462 func needAndBindM() {
2463 needm(false)
2464
2465 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2466 cgoBindM()
2467 }
2468 }
2469
2470
2471
2472
2473 func newextram() {
2474 c := extraMWaiters.Swap(0)
2475 if c > 0 {
2476 for i := uint32(0); i < c; i++ {
2477 oneNewExtraM()
2478 }
2479 } else if extraMLength.Load() == 0 {
2480
2481 oneNewExtraM()
2482 }
2483 }
2484
2485
2486 func oneNewExtraM() {
2487
2488
2489
2490
2491
2492 mp := allocm(nil, nil, -1)
2493 gp := malg(4096)
2494 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2495 gp.sched.sp = gp.stack.hi
2496 gp.sched.sp -= 4 * goarch.PtrSize
2497 gp.sched.lr = 0
2498 gp.sched.g = guintptr(unsafe.Pointer(gp))
2499 gp.syscallpc = gp.sched.pc
2500 gp.syscallsp = gp.sched.sp
2501 gp.stktopsp = gp.sched.sp
2502
2503
2504
2505
2506 casgstatus(gp, _Gidle, _Gdead)
2507 gp.m = mp
2508 mp.curg = gp
2509 mp.isextra = true
2510
2511 mp.isExtraInC = true
2512 mp.lockedInt++
2513 mp.lockedg.set(gp)
2514 gp.lockedm.set(mp)
2515 gp.goid = sched.goidgen.Add(1)
2516 if raceenabled {
2517 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2518 }
2519
2520 allgadd(gp)
2521
2522
2523
2524
2525
2526 sched.ngsys.Add(1)
2527
2528
2529 addExtraM(mp)
2530 }
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565 func dropm() {
2566
2567
2568
2569 mp := getg().m
2570
2571
2572
2573
2574
2575 var trace traceLocker
2576 if !mp.isExtraInSig {
2577 trace = traceAcquire()
2578 }
2579
2580
2581 casgstatus(mp.curg, _Gsyscall, _Gdead)
2582 mp.curg.preemptStop = false
2583 sched.ngsys.Add(1)
2584 sched.nGsyscallNoP.Add(-1)
2585
2586 if !mp.isExtraInSig {
2587 if trace.ok() {
2588 trace.GoDestroySyscall()
2589 traceRelease(trace)
2590 }
2591 }
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 mp.syscalltick--
2607
2608
2609
2610 mp.curg.trace.reset()
2611
2612
2613
2614
2615 if traceEnabled() || traceShuttingDown() {
2616
2617
2618
2619
2620
2621
2622
2623 lock(&sched.lock)
2624 traceThreadDestroy(mp)
2625 unlock(&sched.lock)
2626 }
2627 mp.isExtraInSig = false
2628
2629
2630
2631
2632
2633 sigmask := mp.sigmask
2634 sigblock(false)
2635 unminit()
2636
2637 setg(nil)
2638
2639
2640
2641 g0 := mp.g0
2642 g0.stack.hi = 0
2643 g0.stack.lo = 0
2644 g0.stackguard0 = 0
2645 g0.stackguard1 = 0
2646 mp.g0StackAccurate = false
2647
2648 putExtraM(mp)
2649
2650 msigrestore(sigmask)
2651 }
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 func cgoBindM() {
2674 if GOOS == "windows" || GOOS == "plan9" {
2675 fatal("bindm in unexpected GOOS")
2676 }
2677 g := getg()
2678 if g.m.g0 != g {
2679 fatal("the current g is not g0")
2680 }
2681 if _cgo_bindm != nil {
2682 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2683 }
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697 func getm() uintptr {
2698 return uintptr(unsafe.Pointer(getg().m))
2699 }
2700
2701 var (
2702
2703
2704
2705
2706
2707
2708 extraM atomic.Uintptr
2709
2710 extraMLength atomic.Uint32
2711
2712 extraMWaiters atomic.Uint32
2713
2714
2715 extraMInUse atomic.Uint32
2716 )
2717
2718
2719
2720
2721
2722
2723
2724
2725 func lockextra(nilokay bool) *m {
2726 const locked = 1
2727
2728 incr := false
2729 for {
2730 old := extraM.Load()
2731 if old == locked {
2732 osyield_no_g()
2733 continue
2734 }
2735 if old == 0 && !nilokay {
2736 if !incr {
2737
2738
2739
2740 extraMWaiters.Add(1)
2741 incr = true
2742 }
2743 usleep_no_g(1)
2744 continue
2745 }
2746 if extraM.CompareAndSwap(old, locked) {
2747 return (*m)(unsafe.Pointer(old))
2748 }
2749 osyield_no_g()
2750 continue
2751 }
2752 }
2753
2754
2755 func unlockextra(mp *m, delta int32) {
2756 extraMLength.Add(delta)
2757 extraM.Store(uintptr(unsafe.Pointer(mp)))
2758 }
2759
2760
2761
2762
2763
2764
2765
2766
2767 func getExtraM() (mp *m, last bool) {
2768 mp = lockextra(false)
2769 extraMInUse.Add(1)
2770 unlockextra(mp.schedlink.ptr(), -1)
2771 return mp, mp.schedlink.ptr() == nil
2772 }
2773
2774
2775
2776
2777
2778 func putExtraM(mp *m) {
2779 extraMInUse.Add(-1)
2780 addExtraM(mp)
2781 }
2782
2783
2784
2785
2786 func addExtraM(mp *m) {
2787 mnext := lockextra(true)
2788 mp.schedlink.set(mnext)
2789 unlockextra(mp, 1)
2790 }
2791
2792 var (
2793
2794
2795
2796 allocmLock rwmutex
2797
2798
2799
2800
2801 execLock rwmutex
2802 )
2803
2804
2805
2806 const (
2807 failthreadcreate = "runtime: failed to create new OS thread\n"
2808 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2809 )
2810
2811
2812
2813
2814 var newmHandoff struct {
2815 lock mutex
2816
2817
2818
2819 newm muintptr
2820
2821
2822
2823 waiting bool
2824 wake note
2825
2826
2827
2828
2829 haveTemplateThread uint32
2830 }
2831
2832
2833
2834
2835
2836
2837
2838
2839 func newm(fn func(), pp *p, id int64) {
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850 acquirem()
2851
2852 mp := allocm(pp, fn, id)
2853 mp.nextp.set(pp)
2854 mp.sigmask = initSigmask
2855 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 lock(&newmHandoff.lock)
2868 if newmHandoff.haveTemplateThread == 0 {
2869 throw("on a locked thread with no template thread")
2870 }
2871 mp.schedlink = newmHandoff.newm
2872 newmHandoff.newm.set(mp)
2873 if newmHandoff.waiting {
2874 newmHandoff.waiting = false
2875 notewakeup(&newmHandoff.wake)
2876 }
2877 unlock(&newmHandoff.lock)
2878
2879
2880
2881 releasem(getg().m)
2882 return
2883 }
2884 newm1(mp)
2885 releasem(getg().m)
2886 }
2887
2888 func newm1(mp *m) {
2889 if iscgo {
2890 var ts cgothreadstart
2891 if _cgo_thread_start == nil {
2892 throw("_cgo_thread_start missing")
2893 }
2894 ts.g.set(mp.g0)
2895 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2896 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2897 if msanenabled {
2898 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2899 }
2900 if asanenabled {
2901 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2902 }
2903 execLock.rlock()
2904 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2905 execLock.runlock()
2906 return
2907 }
2908 execLock.rlock()
2909 newosproc(mp)
2910 execLock.runlock()
2911 }
2912
2913
2914
2915
2916
2917 func startTemplateThread() {
2918 if GOARCH == "wasm" {
2919 return
2920 }
2921
2922
2923
2924 mp := acquirem()
2925 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2926 releasem(mp)
2927 return
2928 }
2929 newm(templateThread, nil, -1)
2930 releasem(mp)
2931 }
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 func templateThread() {
2946 lock(&sched.lock)
2947 sched.nmsys++
2948 checkdead()
2949 unlock(&sched.lock)
2950
2951 for {
2952 lock(&newmHandoff.lock)
2953 for newmHandoff.newm != 0 {
2954 newm := newmHandoff.newm.ptr()
2955 newmHandoff.newm = 0
2956 unlock(&newmHandoff.lock)
2957 for newm != nil {
2958 next := newm.schedlink.ptr()
2959 newm.schedlink = 0
2960 newm1(newm)
2961 newm = next
2962 }
2963 lock(&newmHandoff.lock)
2964 }
2965 newmHandoff.waiting = true
2966 noteclear(&newmHandoff.wake)
2967 unlock(&newmHandoff.lock)
2968 notesleep(&newmHandoff.wake)
2969 }
2970 }
2971
2972
2973
2974 func stopm() {
2975 gp := getg()
2976
2977 if gp.m.locks != 0 {
2978 throw("stopm holding locks")
2979 }
2980 if gp.m.p != 0 {
2981 throw("stopm holding p")
2982 }
2983 if gp.m.spinning {
2984 throw("stopm spinning")
2985 }
2986
2987 lock(&sched.lock)
2988 mput(gp.m)
2989 unlock(&sched.lock)
2990 mPark()
2991 acquirep(gp.m.nextp.ptr())
2992 gp.m.nextp = 0
2993 }
2994
2995 func mspinning() {
2996
2997 getg().m.spinning = true
2998 }
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017 func startm(pp *p, spinning, lockheld bool) {
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034 mp := acquirem()
3035 if !lockheld {
3036 lock(&sched.lock)
3037 }
3038 if pp == nil {
3039 if spinning {
3040
3041
3042
3043 throw("startm: P required for spinning=true")
3044 }
3045 pp, _ = pidleget(0)
3046 if pp == nil {
3047 if !lockheld {
3048 unlock(&sched.lock)
3049 }
3050 releasem(mp)
3051 return
3052 }
3053 }
3054 nmp := mget()
3055 if nmp == nil {
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070 id := mReserveID()
3071 unlock(&sched.lock)
3072
3073 var fn func()
3074 if spinning {
3075
3076 fn = mspinning
3077 }
3078 newm(fn, pp, id)
3079
3080 if lockheld {
3081 lock(&sched.lock)
3082 }
3083
3084
3085 releasem(mp)
3086 return
3087 }
3088 if !lockheld {
3089 unlock(&sched.lock)
3090 }
3091 if nmp.spinning {
3092 throw("startm: m is spinning")
3093 }
3094 if nmp.nextp != 0 {
3095 throw("startm: m has p")
3096 }
3097 if spinning && !runqempty(pp) {
3098 throw("startm: p has runnable gs")
3099 }
3100
3101 nmp.spinning = spinning
3102 nmp.nextp.set(pp)
3103 notewakeup(&nmp.park)
3104
3105
3106 releasem(mp)
3107 }
3108
3109
3110
3111
3112
3113 func handoffp(pp *p) {
3114
3115
3116
3117
3118 if !runqempty(pp) || !sched.runq.empty() {
3119 startm(pp, false, false)
3120 return
3121 }
3122
3123 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3124 startm(pp, false, false)
3125 return
3126 }
3127
3128 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3129 startm(pp, false, false)
3130 return
3131 }
3132
3133
3134 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3135 sched.needspinning.Store(0)
3136 startm(pp, true, false)
3137 return
3138 }
3139 lock(&sched.lock)
3140 if sched.gcwaiting.Load() {
3141 pp.status = _Pgcstop
3142 pp.gcStopTime = nanotime()
3143 sched.stopwait--
3144 if sched.stopwait == 0 {
3145 notewakeup(&sched.stopnote)
3146 }
3147 unlock(&sched.lock)
3148 return
3149 }
3150 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3151 sched.safePointFn(pp)
3152 sched.safePointWait--
3153 if sched.safePointWait == 0 {
3154 notewakeup(&sched.safePointNote)
3155 }
3156 }
3157 if !sched.runq.empty() {
3158 unlock(&sched.lock)
3159 startm(pp, false, false)
3160 return
3161 }
3162
3163
3164 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3165 unlock(&sched.lock)
3166 startm(pp, false, false)
3167 return
3168 }
3169
3170
3171
3172 when := pp.timers.wakeTime()
3173 pidleput(pp, 0)
3174 unlock(&sched.lock)
3175
3176 if when != 0 {
3177 wakeNetPoller(when)
3178 }
3179 }
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194 func wakep() {
3195
3196
3197 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3198 return
3199 }
3200
3201
3202
3203
3204
3205
3206 mp := acquirem()
3207
3208 var pp *p
3209 lock(&sched.lock)
3210 pp, _ = pidlegetSpinning(0)
3211 if pp == nil {
3212 if sched.nmspinning.Add(-1) < 0 {
3213 throw("wakep: negative nmspinning")
3214 }
3215 unlock(&sched.lock)
3216 releasem(mp)
3217 return
3218 }
3219
3220
3221
3222
3223 unlock(&sched.lock)
3224
3225 startm(pp, true, false)
3226
3227 releasem(mp)
3228 }
3229
3230
3231
3232 func stoplockedm() {
3233 gp := getg()
3234
3235 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3236 throw("stoplockedm: inconsistent locking")
3237 }
3238 if gp.m.p != 0 {
3239
3240 pp := releasep()
3241 handoffp(pp)
3242 }
3243 incidlelocked(1)
3244
3245 mPark()
3246 status := readgstatus(gp.m.lockedg.ptr())
3247 if status&^_Gscan != _Grunnable {
3248 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3249 dumpgstatus(gp.m.lockedg.ptr())
3250 throw("stoplockedm: not runnable")
3251 }
3252 acquirep(gp.m.nextp.ptr())
3253 gp.m.nextp = 0
3254 }
3255
3256
3257
3258
3259
3260 func startlockedm(gp *g) {
3261 mp := gp.lockedm.ptr()
3262 if mp == getg().m {
3263 throw("startlockedm: locked to me")
3264 }
3265 if mp.nextp != 0 {
3266 throw("startlockedm: m has p")
3267 }
3268
3269 incidlelocked(-1)
3270 pp := releasep()
3271 mp.nextp.set(pp)
3272 notewakeup(&mp.park)
3273 stopm()
3274 }
3275
3276
3277
3278 func gcstopm() {
3279 gp := getg()
3280
3281 if !sched.gcwaiting.Load() {
3282 throw("gcstopm: not waiting for gc")
3283 }
3284 if gp.m.spinning {
3285 gp.m.spinning = false
3286
3287
3288 if sched.nmspinning.Add(-1) < 0 {
3289 throw("gcstopm: negative nmspinning")
3290 }
3291 }
3292 pp := releasep()
3293 lock(&sched.lock)
3294 pp.status = _Pgcstop
3295 pp.gcStopTime = nanotime()
3296 sched.stopwait--
3297 if sched.stopwait == 0 {
3298 notewakeup(&sched.stopnote)
3299 }
3300 unlock(&sched.lock)
3301 stopm()
3302 }
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313 func execute(gp *g, inheritTime bool) {
3314 mp := getg().m
3315
3316 if goroutineProfile.active {
3317
3318
3319
3320 tryRecordGoroutineProfile(gp, nil, osyield)
3321 }
3322
3323
3324 mp.curg = gp
3325 gp.m = mp
3326 gp.syncSafePoint = false
3327 casgstatus(gp, _Grunnable, _Grunning)
3328 gp.waitsince = 0
3329 gp.preempt = false
3330 gp.stackguard0 = gp.stack.lo + stackGuard
3331 if !inheritTime {
3332 mp.p.ptr().schedtick++
3333 }
3334
3335
3336 hz := sched.profilehz
3337 if mp.profilehz != hz {
3338 setThreadCPUProfiler(hz)
3339 }
3340
3341 trace := traceAcquire()
3342 if trace.ok() {
3343 trace.GoStart()
3344 traceRelease(trace)
3345 }
3346
3347 gogo(&gp.sched)
3348 }
3349
3350
3351
3352
3353
3354 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3355 mp := getg().m
3356
3357
3358
3359
3360
3361 top:
3362
3363
3364
3365 mp.clearAllpSnapshot()
3366
3367 pp := mp.p.ptr()
3368 if sched.gcwaiting.Load() {
3369 gcstopm()
3370 goto top
3371 }
3372 if pp.runSafePointFn != 0 {
3373 runSafePointFn()
3374 }
3375
3376
3377
3378
3379
3380 now, pollUntil, _ := pp.timers.check(0, nil)
3381
3382
3383 if traceEnabled() || traceShuttingDown() {
3384 gp := traceReader()
3385 if gp != nil {
3386 trace := traceAcquire()
3387 casgstatus(gp, _Gwaiting, _Grunnable)
3388 if trace.ok() {
3389 trace.GoUnpark(gp, 0)
3390 traceRelease(trace)
3391 }
3392 return gp, false, true
3393 }
3394 }
3395
3396
3397 if gcBlackenEnabled != 0 {
3398 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3399 if gp != nil {
3400 return gp, false, true
3401 }
3402 now = tnow
3403 }
3404
3405
3406
3407
3408 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3409 lock(&sched.lock)
3410 gp := globrunqget()
3411 unlock(&sched.lock)
3412 if gp != nil {
3413 return gp, false, false
3414 }
3415 }
3416
3417
3418 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3419 if gp := wakefing(); gp != nil {
3420 ready(gp, 0, true)
3421 }
3422 }
3423
3424
3425 if gcCleanups.needsWake() {
3426 gcCleanups.wake()
3427 }
3428
3429 if *cgo_yield != nil {
3430 asmcgocall(*cgo_yield, nil)
3431 }
3432
3433
3434 if gp, inheritTime := runqget(pp); gp != nil {
3435 return gp, inheritTime, false
3436 }
3437
3438
3439 if !sched.runq.empty() {
3440 lock(&sched.lock)
3441 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3442 unlock(&sched.lock)
3443 if gp != nil {
3444 if runqputbatch(pp, &q); !q.empty() {
3445 throw("Couldn't put Gs into empty local runq")
3446 }
3447 return gp, false, false
3448 }
3449 }
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3461 list, delta := netpoll(0)
3462 sched.pollingNet.Store(0)
3463 if !list.empty() {
3464 gp := list.pop()
3465 injectglist(&list)
3466 netpollAdjustWaiters(delta)
3467 trace := traceAcquire()
3468 casgstatus(gp, _Gwaiting, _Grunnable)
3469 if trace.ok() {
3470 trace.GoUnpark(gp, 0)
3471 traceRelease(trace)
3472 }
3473 return gp, false, false
3474 }
3475 }
3476
3477
3478
3479
3480
3481
3482 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3483 if !mp.spinning {
3484 mp.becomeSpinning()
3485 }
3486
3487 gp, inheritTime, tnow, w, newWork := stealWork(now)
3488 if gp != nil {
3489
3490 return gp, inheritTime, false
3491 }
3492 if newWork {
3493
3494
3495 goto top
3496 }
3497
3498 now = tnow
3499 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3500
3501 pollUntil = w
3502 }
3503 }
3504
3505
3506
3507
3508
3509 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3510 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3511 if node != nil {
3512 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3513 gp := node.gp.ptr()
3514
3515 trace := traceAcquire()
3516 casgstatus(gp, _Gwaiting, _Grunnable)
3517 if trace.ok() {
3518 trace.GoUnpark(gp, 0)
3519 traceRelease(trace)
3520 }
3521 return gp, false, false
3522 }
3523 gcController.removeIdleMarkWorker()
3524 }
3525
3526
3527
3528
3529
3530 gp, otherReady := beforeIdle(now, pollUntil)
3531 if gp != nil {
3532 trace := traceAcquire()
3533 casgstatus(gp, _Gwaiting, _Grunnable)
3534 if trace.ok() {
3535 trace.GoUnpark(gp, 0)
3536 traceRelease(trace)
3537 }
3538 return gp, false, false
3539 }
3540 if otherReady {
3541 goto top
3542 }
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552 allpSnapshot := mp.snapshotAllp()
3553
3554
3555 idlepMaskSnapshot := idlepMask
3556 timerpMaskSnapshot := timerpMask
3557
3558
3559 lock(&sched.lock)
3560 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3561 unlock(&sched.lock)
3562 goto top
3563 }
3564 if !sched.runq.empty() {
3565 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3566 unlock(&sched.lock)
3567 if gp == nil {
3568 throw("global runq empty with non-zero runqsize")
3569 }
3570 if runqputbatch(pp, &q); !q.empty() {
3571 throw("Couldn't put Gs into empty local runq")
3572 }
3573 return gp, false, false
3574 }
3575 if !mp.spinning && sched.needspinning.Load() == 1 {
3576
3577 mp.becomeSpinning()
3578 unlock(&sched.lock)
3579 goto top
3580 }
3581 if releasep() != pp {
3582 throw("findrunnable: wrong p")
3583 }
3584 now = pidleput(pp, now)
3585 unlock(&sched.lock)
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623 wasSpinning := mp.spinning
3624 if mp.spinning {
3625 mp.spinning = false
3626 if sched.nmspinning.Add(-1) < 0 {
3627 throw("findrunnable: negative nmspinning")
3628 }
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641 lock(&sched.lock)
3642 if !sched.runq.empty() {
3643 pp, _ := pidlegetSpinning(0)
3644 if pp != nil {
3645 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3646 unlock(&sched.lock)
3647 if gp == nil {
3648 throw("global runq empty with non-zero runqsize")
3649 }
3650 if runqputbatch(pp, &q); !q.empty() {
3651 throw("Couldn't put Gs into empty local runq")
3652 }
3653 acquirep(pp)
3654 mp.becomeSpinning()
3655 return gp, false, false
3656 }
3657 }
3658 unlock(&sched.lock)
3659
3660 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3661 if pp != nil {
3662 acquirep(pp)
3663 mp.becomeSpinning()
3664 goto top
3665 }
3666
3667
3668 pp, gp := checkIdleGCNoP()
3669 if pp != nil {
3670 acquirep(pp)
3671 mp.becomeSpinning()
3672
3673
3674 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3675 trace := traceAcquire()
3676 casgstatus(gp, _Gwaiting, _Grunnable)
3677 if trace.ok() {
3678 trace.GoUnpark(gp, 0)
3679 traceRelease(trace)
3680 }
3681 return gp, false, false
3682 }
3683
3684
3685
3686
3687
3688
3689
3690 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3691 }
3692
3693
3694
3695
3696
3697 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3698 sched.pollUntil.Store(pollUntil)
3699 if mp.p != 0 {
3700 throw("findrunnable: netpoll with p")
3701 }
3702 if mp.spinning {
3703 throw("findrunnable: netpoll with spinning")
3704 }
3705 delay := int64(-1)
3706 if pollUntil != 0 {
3707 if now == 0 {
3708 now = nanotime()
3709 }
3710 delay = pollUntil - now
3711 if delay < 0 {
3712 delay = 0
3713 }
3714 }
3715 if faketime != 0 {
3716
3717 delay = 0
3718 }
3719 list, delta := netpoll(delay)
3720
3721 now = nanotime()
3722 sched.pollUntil.Store(0)
3723 sched.lastpoll.Store(now)
3724 if faketime != 0 && list.empty() {
3725
3726
3727 stopm()
3728 goto top
3729 }
3730 lock(&sched.lock)
3731 pp, _ := pidleget(now)
3732 unlock(&sched.lock)
3733 if pp == nil {
3734 injectglist(&list)
3735 netpollAdjustWaiters(delta)
3736 } else {
3737 acquirep(pp)
3738 if !list.empty() {
3739 gp := list.pop()
3740 injectglist(&list)
3741 netpollAdjustWaiters(delta)
3742 trace := traceAcquire()
3743 casgstatus(gp, _Gwaiting, _Grunnable)
3744 if trace.ok() {
3745 trace.GoUnpark(gp, 0)
3746 traceRelease(trace)
3747 }
3748 return gp, false, false
3749 }
3750 if wasSpinning {
3751 mp.becomeSpinning()
3752 }
3753 goto top
3754 }
3755 } else if pollUntil != 0 && netpollinited() {
3756 pollerPollUntil := sched.pollUntil.Load()
3757 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3758 netpollBreak()
3759 }
3760 }
3761 stopm()
3762 goto top
3763 }
3764
3765
3766
3767
3768
3769 func pollWork() bool {
3770 if !sched.runq.empty() {
3771 return true
3772 }
3773 p := getg().m.p.ptr()
3774 if !runqempty(p) {
3775 return true
3776 }
3777 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3778 if list, delta := netpoll(0); !list.empty() {
3779 injectglist(&list)
3780 netpollAdjustWaiters(delta)
3781 return true
3782 }
3783 }
3784 return false
3785 }
3786
3787
3788
3789
3790
3791
3792
3793 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3794 pp := getg().m.p.ptr()
3795
3796 ranTimer := false
3797
3798 const stealTries = 4
3799 for i := 0; i < stealTries; i++ {
3800 stealTimersOrRunNextG := i == stealTries-1
3801
3802 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3803 if sched.gcwaiting.Load() {
3804
3805 return nil, false, now, pollUntil, true
3806 }
3807 p2 := allp[enum.position()]
3808 if pp == p2 {
3809 continue
3810 }
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3826 tnow, w, ran := p2.timers.check(now, nil)
3827 now = tnow
3828 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3829 pollUntil = w
3830 }
3831 if ran {
3832
3833
3834
3835
3836
3837
3838
3839
3840 if gp, inheritTime := runqget(pp); gp != nil {
3841 return gp, inheritTime, now, pollUntil, ranTimer
3842 }
3843 ranTimer = true
3844 }
3845 }
3846
3847
3848 if !idlepMask.read(enum.position()) {
3849 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3850 return gp, false, now, pollUntil, ranTimer
3851 }
3852 }
3853 }
3854 }
3855
3856
3857
3858
3859 return nil, false, now, pollUntil, ranTimer
3860 }
3861
3862
3863
3864
3865
3866
3867 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3868 for id, p2 := range allpSnapshot {
3869 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3870 lock(&sched.lock)
3871 pp, _ := pidlegetSpinning(0)
3872 if pp == nil {
3873
3874 unlock(&sched.lock)
3875 return nil
3876 }
3877 unlock(&sched.lock)
3878 return pp
3879 }
3880 }
3881
3882
3883 return nil
3884 }
3885
3886
3887
3888
3889 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3890 for id, p2 := range allpSnapshot {
3891 if timerpMaskSnapshot.read(uint32(id)) {
3892 w := p2.timers.wakeTime()
3893 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3894 pollUntil = w
3895 }
3896 }
3897 }
3898
3899 return pollUntil
3900 }
3901
3902
3903
3904
3905
3906 func checkIdleGCNoP() (*p, *g) {
3907
3908
3909
3910
3911
3912
3913 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3914 return nil, nil
3915 }
3916 if !gcMarkWorkAvailable(nil) {
3917 return nil, nil
3918 }
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937 lock(&sched.lock)
3938 pp, now := pidlegetSpinning(0)
3939 if pp == nil {
3940 unlock(&sched.lock)
3941 return nil, nil
3942 }
3943
3944
3945 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3946 pidleput(pp, now)
3947 unlock(&sched.lock)
3948 return nil, nil
3949 }
3950
3951 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3952 if node == nil {
3953 pidleput(pp, now)
3954 unlock(&sched.lock)
3955 gcController.removeIdleMarkWorker()
3956 return nil, nil
3957 }
3958
3959 unlock(&sched.lock)
3960
3961 return pp, node.gp.ptr()
3962 }
3963
3964
3965
3966
3967 func wakeNetPoller(when int64) {
3968 if sched.lastpoll.Load() == 0 {
3969
3970
3971
3972
3973 pollerPollUntil := sched.pollUntil.Load()
3974 if pollerPollUntil == 0 || pollerPollUntil > when {
3975 netpollBreak()
3976 }
3977 } else {
3978
3979
3980 if GOOS != "plan9" {
3981 wakep()
3982 }
3983 }
3984 }
3985
3986 func resetspinning() {
3987 gp := getg()
3988 if !gp.m.spinning {
3989 throw("resetspinning: not a spinning m")
3990 }
3991 gp.m.spinning = false
3992 nmspinning := sched.nmspinning.Add(-1)
3993 if nmspinning < 0 {
3994 throw("findrunnable: negative nmspinning")
3995 }
3996
3997
3998
3999 wakep()
4000 }
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010 func injectglist(glist *gList) {
4011 if glist.empty() {
4012 return
4013 }
4014
4015
4016
4017 var tail *g
4018 trace := traceAcquire()
4019 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4020 tail = gp
4021 casgstatus(gp, _Gwaiting, _Grunnable)
4022 if trace.ok() {
4023 trace.GoUnpark(gp, 0)
4024 }
4025 }
4026 if trace.ok() {
4027 traceRelease(trace)
4028 }
4029
4030
4031 q := gQueue{glist.head, tail.guintptr(), glist.size}
4032 *glist = gList{}
4033
4034 startIdle := func(n int32) {
4035 for ; n > 0; n-- {
4036 mp := acquirem()
4037 lock(&sched.lock)
4038
4039 pp, _ := pidlegetSpinning(0)
4040 if pp == nil {
4041 unlock(&sched.lock)
4042 releasem(mp)
4043 break
4044 }
4045
4046 startm(pp, false, true)
4047 unlock(&sched.lock)
4048 releasem(mp)
4049 }
4050 }
4051
4052 pp := getg().m.p.ptr()
4053 if pp == nil {
4054 n := q.size
4055 lock(&sched.lock)
4056 globrunqputbatch(&q)
4057 unlock(&sched.lock)
4058 startIdle(n)
4059 return
4060 }
4061
4062 var globq gQueue
4063 npidle := sched.npidle.Load()
4064 for ; npidle > 0 && !q.empty(); npidle-- {
4065 g := q.pop()
4066 globq.pushBack(g)
4067 }
4068 if !globq.empty() {
4069 n := globq.size
4070 lock(&sched.lock)
4071 globrunqputbatch(&globq)
4072 unlock(&sched.lock)
4073 startIdle(n)
4074 }
4075
4076 if runqputbatch(pp, &q); !q.empty() {
4077 lock(&sched.lock)
4078 globrunqputbatch(&q)
4079 unlock(&sched.lock)
4080 }
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095 wakep()
4096 }
4097
4098
4099
4100 func schedule() {
4101 mp := getg().m
4102
4103 if mp.locks != 0 {
4104 throw("schedule: holding locks")
4105 }
4106
4107 if mp.lockedg != 0 {
4108 stoplockedm()
4109 execute(mp.lockedg.ptr(), false)
4110 }
4111
4112
4113
4114 if mp.incgo {
4115 throw("schedule: in cgo")
4116 }
4117
4118 top:
4119 pp := mp.p.ptr()
4120 pp.preempt = false
4121
4122
4123
4124
4125 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4126 throw("schedule: spinning with local work")
4127 }
4128
4129 gp, inheritTime, tryWakeP := findRunnable()
4130
4131
4132
4133
4134 mp.clearAllpSnapshot()
4135
4136 if debug.dontfreezetheworld > 0 && freezing.Load() {
4137
4138
4139
4140
4141
4142
4143
4144 lock(&deadlock)
4145 lock(&deadlock)
4146 }
4147
4148
4149
4150
4151 if mp.spinning {
4152 resetspinning()
4153 }
4154
4155 if sched.disable.user && !schedEnabled(gp) {
4156
4157
4158
4159 lock(&sched.lock)
4160 if schedEnabled(gp) {
4161
4162
4163 unlock(&sched.lock)
4164 } else {
4165 sched.disable.runnable.pushBack(gp)
4166 unlock(&sched.lock)
4167 goto top
4168 }
4169 }
4170
4171
4172
4173 if tryWakeP {
4174 wakep()
4175 }
4176 if gp.lockedm != 0 {
4177
4178
4179 startlockedm(gp)
4180 goto top
4181 }
4182
4183 execute(gp, inheritTime)
4184 }
4185
4186
4187
4188
4189
4190
4191
4192
4193 func dropg() {
4194 gp := getg()
4195
4196 setMNoWB(&gp.m.curg.m, nil)
4197 setGNoWB(&gp.m.curg, nil)
4198 }
4199
4200 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4201 unlock((*mutex)(lock))
4202 return true
4203 }
4204
4205
4206 func park_m(gp *g) {
4207 mp := getg().m
4208
4209 trace := traceAcquire()
4210
4211
4212
4213
4214
4215 bubble := gp.bubble
4216 if bubble != nil {
4217 bubble.incActive()
4218 }
4219
4220 if trace.ok() {
4221
4222
4223
4224 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4225 }
4226
4227
4228 casgstatus(gp, _Grunning, _Gwaiting)
4229 if trace.ok() {
4230 traceRelease(trace)
4231 }
4232
4233 dropg()
4234
4235 if fn := mp.waitunlockf; fn != nil {
4236 ok := fn(gp, mp.waitlock)
4237 mp.waitunlockf = nil
4238 mp.waitlock = nil
4239 if !ok {
4240 trace := traceAcquire()
4241 casgstatus(gp, _Gwaiting, _Grunnable)
4242 if bubble != nil {
4243 bubble.decActive()
4244 }
4245 if trace.ok() {
4246 trace.GoUnpark(gp, 2)
4247 traceRelease(trace)
4248 }
4249 execute(gp, true)
4250 }
4251 }
4252
4253 if bubble != nil {
4254 bubble.decActive()
4255 }
4256
4257 schedule()
4258 }
4259
4260 func goschedImpl(gp *g, preempted bool) {
4261 trace := traceAcquire()
4262 status := readgstatus(gp)
4263 if status&^_Gscan != _Grunning {
4264 dumpgstatus(gp)
4265 throw("bad g status")
4266 }
4267 if trace.ok() {
4268
4269
4270
4271 if preempted {
4272 trace.GoPreempt()
4273 } else {
4274 trace.GoSched()
4275 }
4276 }
4277 casgstatus(gp, _Grunning, _Grunnable)
4278 if trace.ok() {
4279 traceRelease(trace)
4280 }
4281
4282 dropg()
4283 lock(&sched.lock)
4284 globrunqput(gp)
4285 unlock(&sched.lock)
4286
4287 if mainStarted {
4288 wakep()
4289 }
4290
4291 schedule()
4292 }
4293
4294
4295 func gosched_m(gp *g) {
4296 goschedImpl(gp, false)
4297 }
4298
4299
4300 func goschedguarded_m(gp *g) {
4301 if !canPreemptM(gp.m) {
4302 gogo(&gp.sched)
4303 }
4304 goschedImpl(gp, false)
4305 }
4306
4307 func gopreempt_m(gp *g) {
4308 goschedImpl(gp, true)
4309 }
4310
4311
4312
4313
4314 func preemptPark(gp *g) {
4315 status := readgstatus(gp)
4316 if status&^_Gscan != _Grunning {
4317 dumpgstatus(gp)
4318 throw("bad g status")
4319 }
4320
4321 if gp.asyncSafePoint {
4322
4323
4324
4325 f := findfunc(gp.sched.pc)
4326 if !f.valid() {
4327 throw("preempt at unknown pc")
4328 }
4329 if f.flag&abi.FuncFlagSPWrite != 0 {
4330 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4331 throw("preempt SPWRITE")
4332 }
4333 }
4334
4335
4336
4337
4338
4339
4340
4341 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4342 dropg()
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359 trace := traceAcquire()
4360 if trace.ok() {
4361 trace.GoPark(traceBlockPreempted, 0)
4362 }
4363 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4364 if trace.ok() {
4365 traceRelease(trace)
4366 }
4367 schedule()
4368 }
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384 func goyield() {
4385 checkTimeouts()
4386 mcall(goyield_m)
4387 }
4388
4389 func goyield_m(gp *g) {
4390 trace := traceAcquire()
4391 pp := gp.m.p.ptr()
4392 if trace.ok() {
4393
4394
4395
4396 trace.GoPreempt()
4397 }
4398 casgstatus(gp, _Grunning, _Grunnable)
4399 if trace.ok() {
4400 traceRelease(trace)
4401 }
4402 dropg()
4403 runqput(pp, gp, false)
4404 schedule()
4405 }
4406
4407
4408 func goexit1() {
4409 if raceenabled {
4410 if gp := getg(); gp.bubble != nil {
4411 racereleasemergeg(gp, gp.bubble.raceaddr())
4412 }
4413 racegoend()
4414 }
4415 trace := traceAcquire()
4416 if trace.ok() {
4417 trace.GoEnd()
4418 traceRelease(trace)
4419 }
4420 mcall(goexit0)
4421 }
4422
4423
4424 func goexit0(gp *g) {
4425 gdestroy(gp)
4426 schedule()
4427 }
4428
4429 func gdestroy(gp *g) {
4430 mp := getg().m
4431 pp := mp.p.ptr()
4432
4433 casgstatus(gp, _Grunning, _Gdead)
4434 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4435 if isSystemGoroutine(gp, false) {
4436 sched.ngsys.Add(-1)
4437 }
4438 gp.m = nil
4439 locked := gp.lockedm != 0
4440 gp.lockedm = 0
4441 mp.lockedg = 0
4442 gp.preemptStop = false
4443 gp.paniconfault = false
4444 gp._defer = nil
4445 gp._panic = nil
4446 gp.writebuf = nil
4447 gp.waitreason = waitReasonZero
4448 gp.param = nil
4449 gp.labels = nil
4450 gp.timer = nil
4451 gp.bubble = nil
4452
4453 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4454
4455
4456
4457 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4458 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4459 gcController.bgScanCredit.Add(scanCredit)
4460 gp.gcAssistBytes = 0
4461 }
4462
4463 dropg()
4464
4465 if GOARCH == "wasm" {
4466 gfput(pp, gp)
4467 return
4468 }
4469
4470 if locked && mp.lockedInt != 0 {
4471 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4472 if mp.isextra {
4473 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4474 }
4475 throw("exited a goroutine internally locked to the OS thread")
4476 }
4477 gfput(pp, gp)
4478 if locked {
4479
4480
4481
4482
4483
4484
4485 if GOOS != "plan9" {
4486 gogo(&mp.g0.sched)
4487 } else {
4488
4489
4490 mp.lockedExt = 0
4491 }
4492 }
4493 }
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503 func save(pc, sp, bp uintptr) {
4504 gp := getg()
4505
4506 if gp == gp.m.g0 || gp == gp.m.gsignal {
4507
4508
4509
4510
4511
4512 throw("save on system g not allowed")
4513 }
4514
4515 gp.sched.pc = pc
4516 gp.sched.sp = sp
4517 gp.sched.lr = 0
4518 gp.sched.bp = bp
4519
4520
4521
4522 if gp.sched.ctxt != nil {
4523 badctxt()
4524 }
4525 }
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551 func reentersyscall(pc, sp, bp uintptr) {
4552 trace := traceAcquire()
4553 gp := getg()
4554
4555
4556
4557 gp.m.locks++
4558
4559
4560
4561
4562
4563 gp.stackguard0 = stackPreempt
4564 gp.throwsplit = true
4565
4566
4567 save(pc, sp, bp)
4568 gp.syscallsp = sp
4569 gp.syscallpc = pc
4570 gp.syscallbp = bp
4571 casgstatus(gp, _Grunning, _Gsyscall)
4572 if staticLockRanking {
4573
4574
4575 save(pc, sp, bp)
4576 }
4577 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4578 systemstack(func() {
4579 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4580 throw("entersyscall")
4581 })
4582 }
4583 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4584 systemstack(func() {
4585 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4586 throw("entersyscall")
4587 })
4588 }
4589
4590 if trace.ok() {
4591 systemstack(func() {
4592 trace.GoSysCall()
4593 traceRelease(trace)
4594 })
4595
4596
4597
4598 save(pc, sp, bp)
4599 }
4600
4601 if sched.sysmonwait.Load() {
4602 systemstack(entersyscall_sysmon)
4603 save(pc, sp, bp)
4604 }
4605
4606 if gp.m.p.ptr().runSafePointFn != 0 {
4607
4608 systemstack(runSafePointFn)
4609 save(pc, sp, bp)
4610 }
4611
4612 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4613 pp := gp.m.p.ptr()
4614 pp.m = 0
4615 gp.m.oldp.set(pp)
4616 gp.m.p = 0
4617 atomic.Store(&pp.status, _Psyscall)
4618 if sched.gcwaiting.Load() {
4619 systemstack(entersyscall_gcwait)
4620 save(pc, sp, bp)
4621 }
4622
4623 gp.m.locks--
4624 }
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640 func entersyscall() {
4641
4642
4643
4644
4645 fp := getcallerfp()
4646 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4647 }
4648
4649 func entersyscall_sysmon() {
4650 lock(&sched.lock)
4651 if sched.sysmonwait.Load() {
4652 sched.sysmonwait.Store(false)
4653 notewakeup(&sched.sysmonnote)
4654 }
4655 unlock(&sched.lock)
4656 }
4657
4658 func entersyscall_gcwait() {
4659 gp := getg()
4660 pp := gp.m.oldp.ptr()
4661
4662 lock(&sched.lock)
4663 trace := traceAcquire()
4664 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4665 if trace.ok() {
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675 trace.ProcSteal(pp, true)
4676 traceRelease(trace)
4677 }
4678 sched.nGsyscallNoP.Add(1)
4679 pp.gcStopTime = nanotime()
4680 pp.syscalltick++
4681 if sched.stopwait--; sched.stopwait == 0 {
4682 notewakeup(&sched.stopnote)
4683 }
4684 } else if trace.ok() {
4685 traceRelease(trace)
4686 }
4687 unlock(&sched.lock)
4688 }
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702 func entersyscallblock() {
4703 gp := getg()
4704
4705 gp.m.locks++
4706 gp.throwsplit = true
4707 gp.stackguard0 = stackPreempt
4708 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4709 gp.m.p.ptr().syscalltick++
4710
4711 sched.nGsyscallNoP.Add(1)
4712
4713
4714 pc := sys.GetCallerPC()
4715 sp := sys.GetCallerSP()
4716 bp := getcallerfp()
4717 save(pc, sp, bp)
4718 gp.syscallsp = gp.sched.sp
4719 gp.syscallpc = gp.sched.pc
4720 gp.syscallbp = gp.sched.bp
4721 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4722 sp1 := sp
4723 sp2 := gp.sched.sp
4724 sp3 := gp.syscallsp
4725 systemstack(func() {
4726 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4727 throw("entersyscallblock")
4728 })
4729 }
4730 casgstatus(gp, _Grunning, _Gsyscall)
4731 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4732 systemstack(func() {
4733 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4734 throw("entersyscallblock")
4735 })
4736 }
4737 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4738 systemstack(func() {
4739 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4740 throw("entersyscallblock")
4741 })
4742 }
4743
4744 systemstack(entersyscallblock_handoff)
4745
4746
4747 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4748
4749 gp.m.locks--
4750 }
4751
4752 func entersyscallblock_handoff() {
4753 trace := traceAcquire()
4754 if trace.ok() {
4755 trace.GoSysCall()
4756 traceRelease(trace)
4757 }
4758 handoffp(releasep())
4759 }
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781 func exitsyscall() {
4782 gp := getg()
4783
4784 gp.m.locks++
4785 if sys.GetCallerSP() > gp.syscallsp {
4786 throw("exitsyscall: syscall frame is no longer valid")
4787 }
4788
4789 gp.waitsince = 0
4790 oldp := gp.m.oldp.ptr()
4791 gp.m.oldp = 0
4792 if exitsyscallfast(oldp) {
4793
4794
4795 if goroutineProfile.active {
4796
4797
4798
4799 systemstack(func() {
4800 tryRecordGoroutineProfileWB(gp)
4801 })
4802 }
4803 trace := traceAcquire()
4804 if trace.ok() {
4805 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4806 systemstack(func() {
4807
4808
4809
4810
4811 trace.GoSysExit(lostP)
4812 if lostP {
4813
4814
4815
4816
4817 trace.GoStart()
4818 }
4819 })
4820 }
4821
4822 gp.m.p.ptr().syscalltick++
4823
4824 casgstatus(gp, _Gsyscall, _Grunning)
4825 if trace.ok() {
4826 traceRelease(trace)
4827 }
4828
4829
4830
4831 gp.syscallsp = 0
4832 gp.m.locks--
4833 if gp.preempt {
4834
4835 gp.stackguard0 = stackPreempt
4836 } else {
4837
4838 gp.stackguard0 = gp.stack.lo + stackGuard
4839 }
4840 gp.throwsplit = false
4841
4842 if sched.disable.user && !schedEnabled(gp) {
4843
4844 Gosched()
4845 }
4846
4847 return
4848 }
4849
4850 gp.m.locks--
4851
4852
4853 mcall(exitsyscall0)
4854
4855
4856
4857
4858
4859
4860
4861 gp.syscallsp = 0
4862 gp.m.p.ptr().syscalltick++
4863 gp.throwsplit = false
4864 }
4865
4866
4867 func exitsyscallfast(oldp *p) bool {
4868
4869 if sched.stopwait == freezeStopWait {
4870 return false
4871 }
4872
4873
4874 trace := traceAcquire()
4875 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4876
4877 wirep(oldp)
4878 exitsyscallfast_reacquired(trace)
4879 if trace.ok() {
4880 traceRelease(trace)
4881 }
4882 return true
4883 }
4884 if trace.ok() {
4885 traceRelease(trace)
4886 }
4887
4888
4889 if sched.pidle != 0 {
4890 var ok bool
4891 systemstack(func() {
4892 ok = exitsyscallfast_pidle()
4893 })
4894 if ok {
4895 return true
4896 }
4897 }
4898 return false
4899 }
4900
4901
4902
4903
4904
4905
4906 func exitsyscallfast_reacquired(trace traceLocker) {
4907 gp := getg()
4908 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4909 if trace.ok() {
4910
4911
4912
4913 systemstack(func() {
4914
4915
4916 trace.ProcSteal(gp.m.p.ptr(), true)
4917 trace.ProcStart()
4918 })
4919 }
4920 gp.m.p.ptr().syscalltick++
4921 }
4922 }
4923
4924 func exitsyscallfast_pidle() bool {
4925 lock(&sched.lock)
4926 pp, _ := pidleget(0)
4927 if pp != nil && sched.sysmonwait.Load() {
4928 sched.sysmonwait.Store(false)
4929 notewakeup(&sched.sysmonnote)
4930 }
4931 unlock(&sched.lock)
4932 if pp != nil {
4933 sched.nGsyscallNoP.Add(-1)
4934 acquirep(pp)
4935 return true
4936 }
4937 return false
4938 }
4939
4940
4941
4942
4943
4944
4945
4946 func exitsyscall0(gp *g) {
4947 var trace traceLocker
4948 traceExitingSyscall()
4949 trace = traceAcquire()
4950 casgstatus(gp, _Gsyscall, _Grunnable)
4951 traceExitedSyscall()
4952 if trace.ok() {
4953
4954
4955
4956
4957 trace.GoSysExit(true)
4958 traceRelease(trace)
4959 }
4960 sched.nGsyscallNoP.Add(-1)
4961 dropg()
4962 lock(&sched.lock)
4963 var pp *p
4964 if schedEnabled(gp) {
4965 pp, _ = pidleget(0)
4966 }
4967 var locked bool
4968 if pp == nil {
4969 globrunqput(gp)
4970
4971
4972
4973
4974
4975
4976 locked = gp.lockedm != 0
4977 } else if sched.sysmonwait.Load() {
4978 sched.sysmonwait.Store(false)
4979 notewakeup(&sched.sysmonnote)
4980 }
4981 unlock(&sched.lock)
4982 if pp != nil {
4983 acquirep(pp)
4984 execute(gp, false)
4985 }
4986 if locked {
4987
4988
4989
4990
4991 stoplockedm()
4992 execute(gp, false)
4993 }
4994 stopm()
4995 schedule()
4996 }
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010 func syscall_runtime_BeforeFork() {
5011 gp := getg().m.curg
5012
5013
5014
5015
5016 gp.m.locks++
5017 sigsave(&gp.m.sigmask)
5018 sigblock(false)
5019
5020
5021
5022
5023
5024 gp.stackguard0 = stackFork
5025 }
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039 func syscall_runtime_AfterFork() {
5040 gp := getg().m.curg
5041
5042
5043 gp.stackguard0 = gp.stack.lo + stackGuard
5044
5045 msigrestore(gp.m.sigmask)
5046
5047 gp.m.locks--
5048 }
5049
5050
5051
5052 var inForkedChild bool
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073 func syscall_runtime_AfterForkInChild() {
5074
5075
5076
5077
5078 inForkedChild = true
5079
5080 clearSignalHandlers()
5081
5082
5083
5084 msigrestore(getg().m.sigmask)
5085
5086 inForkedChild = false
5087 }
5088
5089
5090
5091
5092 var pendingPreemptSignals atomic.Int32
5093
5094
5095
5096
5097 func syscall_runtime_BeforeExec() {
5098
5099 execLock.lock()
5100
5101
5102
5103 if GOOS == "darwin" || GOOS == "ios" {
5104 for pendingPreemptSignals.Load() > 0 {
5105 osyield()
5106 }
5107 }
5108 }
5109
5110
5111
5112
5113 func syscall_runtime_AfterExec() {
5114 execLock.unlock()
5115 }
5116
5117
5118 func malg(stacksize int32) *g {
5119 newg := new(g)
5120 if stacksize >= 0 {
5121 stacksize = round2(stackSystem + stacksize)
5122 systemstack(func() {
5123 newg.stack = stackalloc(uint32(stacksize))
5124 if valgrindenabled {
5125 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5126 }
5127 })
5128 newg.stackguard0 = newg.stack.lo + stackGuard
5129 newg.stackguard1 = ^uintptr(0)
5130
5131
5132 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5133 }
5134 return newg
5135 }
5136
5137
5138
5139
5140 func newproc(fn *funcval) {
5141 gp := getg()
5142 pc := sys.GetCallerPC()
5143 systemstack(func() {
5144 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5145
5146 pp := getg().m.p.ptr()
5147 runqput(pp, newg, true)
5148
5149 if mainStarted {
5150 wakep()
5151 }
5152 })
5153 }
5154
5155
5156
5157
5158 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5159 if fn == nil {
5160 fatal("go of nil func value")
5161 }
5162
5163 mp := acquirem()
5164 pp := mp.p.ptr()
5165 newg := gfget(pp)
5166 if newg == nil {
5167 newg = malg(stackMin)
5168 casgstatus(newg, _Gidle, _Gdead)
5169 allgadd(newg)
5170 }
5171 if newg.stack.hi == 0 {
5172 throw("newproc1: newg missing stack")
5173 }
5174
5175 if readgstatus(newg) != _Gdead {
5176 throw("newproc1: new g is not Gdead")
5177 }
5178
5179 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5180 totalSize = alignUp(totalSize, sys.StackAlign)
5181 sp := newg.stack.hi - totalSize
5182 if usesLR {
5183
5184 *(*uintptr)(unsafe.Pointer(sp)) = 0
5185 prepGoExitFrame(sp)
5186 }
5187 if GOARCH == "arm64" {
5188
5189 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5190 }
5191
5192 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5193 newg.sched.sp = sp
5194 newg.stktopsp = sp
5195 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5196 newg.sched.g = guintptr(unsafe.Pointer(newg))
5197 gostartcallfn(&newg.sched, fn)
5198 newg.parentGoid = callergp.goid
5199 newg.gopc = callerpc
5200 newg.ancestors = saveAncestors(callergp)
5201 newg.startpc = fn.fn
5202 newg.runningCleanups.Store(false)
5203 if isSystemGoroutine(newg, false) {
5204 sched.ngsys.Add(1)
5205 } else {
5206
5207 newg.bubble = callergp.bubble
5208 if mp.curg != nil {
5209 newg.labels = mp.curg.labels
5210 }
5211 if goroutineProfile.active {
5212
5213
5214
5215
5216
5217 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5218 }
5219 }
5220
5221 newg.trackingSeq = uint8(cheaprand())
5222 if newg.trackingSeq%gTrackingPeriod == 0 {
5223 newg.tracking = true
5224 }
5225 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5226
5227
5228 trace := traceAcquire()
5229 var status uint32 = _Grunnable
5230 if parked {
5231 status = _Gwaiting
5232 newg.waitreason = waitreason
5233 }
5234 if pp.goidcache == pp.goidcacheend {
5235
5236
5237
5238 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5239 pp.goidcache -= _GoidCacheBatch - 1
5240 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5241 }
5242 newg.goid = pp.goidcache
5243 casgstatus(newg, _Gdead, status)
5244 pp.goidcache++
5245 newg.trace.reset()
5246 if trace.ok() {
5247 trace.GoCreate(newg, newg.startpc, parked)
5248 traceRelease(trace)
5249 }
5250
5251
5252 if raceenabled {
5253 newg.racectx = racegostart(callerpc)
5254 newg.raceignore = 0
5255 if newg.labels != nil {
5256
5257
5258 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5259 }
5260 }
5261 pp.goroutinesCreated++
5262 releasem(mp)
5263
5264 return newg
5265 }
5266
5267
5268
5269
5270 func saveAncestors(callergp *g) *[]ancestorInfo {
5271
5272 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5273 return nil
5274 }
5275 var callerAncestors []ancestorInfo
5276 if callergp.ancestors != nil {
5277 callerAncestors = *callergp.ancestors
5278 }
5279 n := int32(len(callerAncestors)) + 1
5280 if n > debug.tracebackancestors {
5281 n = debug.tracebackancestors
5282 }
5283 ancestors := make([]ancestorInfo, n)
5284 copy(ancestors[1:], callerAncestors)
5285
5286 var pcs [tracebackInnerFrames]uintptr
5287 npcs := gcallers(callergp, 0, pcs[:])
5288 ipcs := make([]uintptr, npcs)
5289 copy(ipcs, pcs[:])
5290 ancestors[0] = ancestorInfo{
5291 pcs: ipcs,
5292 goid: callergp.goid,
5293 gopc: callergp.gopc,
5294 }
5295
5296 ancestorsp := new([]ancestorInfo)
5297 *ancestorsp = ancestors
5298 return ancestorsp
5299 }
5300
5301
5302
5303 func gfput(pp *p, gp *g) {
5304 if readgstatus(gp) != _Gdead {
5305 throw("gfput: bad status (not Gdead)")
5306 }
5307
5308 stksize := gp.stack.hi - gp.stack.lo
5309
5310 if stksize != uintptr(startingStackSize) {
5311
5312 stackfree(gp.stack)
5313 gp.stack.lo = 0
5314 gp.stack.hi = 0
5315 gp.stackguard0 = 0
5316 if valgrindenabled {
5317 valgrindDeregisterStack(gp.valgrindStackID)
5318 gp.valgrindStackID = 0
5319 }
5320 }
5321
5322 pp.gFree.push(gp)
5323 if pp.gFree.size >= 64 {
5324 var (
5325 stackQ gQueue
5326 noStackQ gQueue
5327 )
5328 for pp.gFree.size >= 32 {
5329 gp := pp.gFree.pop()
5330 if gp.stack.lo == 0 {
5331 noStackQ.push(gp)
5332 } else {
5333 stackQ.push(gp)
5334 }
5335 }
5336 lock(&sched.gFree.lock)
5337 sched.gFree.noStack.pushAll(noStackQ)
5338 sched.gFree.stack.pushAll(stackQ)
5339 unlock(&sched.gFree.lock)
5340 }
5341 }
5342
5343
5344
5345 func gfget(pp *p) *g {
5346 retry:
5347 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5348 lock(&sched.gFree.lock)
5349
5350 for pp.gFree.size < 32 {
5351
5352 gp := sched.gFree.stack.pop()
5353 if gp == nil {
5354 gp = sched.gFree.noStack.pop()
5355 if gp == nil {
5356 break
5357 }
5358 }
5359 pp.gFree.push(gp)
5360 }
5361 unlock(&sched.gFree.lock)
5362 goto retry
5363 }
5364 gp := pp.gFree.pop()
5365 if gp == nil {
5366 return nil
5367 }
5368 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5369
5370
5371
5372 systemstack(func() {
5373 stackfree(gp.stack)
5374 gp.stack.lo = 0
5375 gp.stack.hi = 0
5376 gp.stackguard0 = 0
5377 if valgrindenabled {
5378 valgrindDeregisterStack(gp.valgrindStackID)
5379 gp.valgrindStackID = 0
5380 }
5381 })
5382 }
5383 if gp.stack.lo == 0 {
5384
5385 systemstack(func() {
5386 gp.stack = stackalloc(startingStackSize)
5387 if valgrindenabled {
5388 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5389 }
5390 })
5391 gp.stackguard0 = gp.stack.lo + stackGuard
5392 } else {
5393 if raceenabled {
5394 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5395 }
5396 if msanenabled {
5397 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5398 }
5399 if asanenabled {
5400 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5401 }
5402 }
5403 return gp
5404 }
5405
5406
5407 func gfpurge(pp *p) {
5408 var (
5409 stackQ gQueue
5410 noStackQ gQueue
5411 )
5412 for !pp.gFree.empty() {
5413 gp := pp.gFree.pop()
5414 if gp.stack.lo == 0 {
5415 noStackQ.push(gp)
5416 } else {
5417 stackQ.push(gp)
5418 }
5419 }
5420 lock(&sched.gFree.lock)
5421 sched.gFree.noStack.pushAll(noStackQ)
5422 sched.gFree.stack.pushAll(stackQ)
5423 unlock(&sched.gFree.lock)
5424 }
5425
5426
5427 func Breakpoint() {
5428 breakpoint()
5429 }
5430
5431
5432
5433
5434
5435
5436 func dolockOSThread() {
5437 if GOARCH == "wasm" {
5438 return
5439 }
5440 gp := getg()
5441 gp.m.lockedg.set(gp)
5442 gp.lockedm.set(gp.m)
5443 }
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461 func LockOSThread() {
5462 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5463
5464
5465
5466 startTemplateThread()
5467 }
5468 gp := getg()
5469 gp.m.lockedExt++
5470 if gp.m.lockedExt == 0 {
5471 gp.m.lockedExt--
5472 panic("LockOSThread nesting overflow")
5473 }
5474 dolockOSThread()
5475 }
5476
5477
5478 func lockOSThread() {
5479 getg().m.lockedInt++
5480 dolockOSThread()
5481 }
5482
5483
5484
5485
5486
5487
5488 func dounlockOSThread() {
5489 if GOARCH == "wasm" {
5490 return
5491 }
5492 gp := getg()
5493 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5494 return
5495 }
5496 gp.m.lockedg = 0
5497 gp.lockedm = 0
5498 }
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514 func UnlockOSThread() {
5515 gp := getg()
5516 if gp.m.lockedExt == 0 {
5517 return
5518 }
5519 gp.m.lockedExt--
5520 dounlockOSThread()
5521 }
5522
5523
5524 func unlockOSThread() {
5525 gp := getg()
5526 if gp.m.lockedInt == 0 {
5527 systemstack(badunlockosthread)
5528 }
5529 gp.m.lockedInt--
5530 dounlockOSThread()
5531 }
5532
5533 func badunlockosthread() {
5534 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5535 }
5536
5537 func gcount(includeSys bool) int32 {
5538 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5539 if !includeSys {
5540 n -= sched.ngsys.Load()
5541 }
5542 for _, pp := range allp {
5543 n -= pp.gFree.size
5544 }
5545
5546
5547
5548 if n < 1 {
5549 n = 1
5550 }
5551 return n
5552 }
5553
5554 func mcount() int32 {
5555 return int32(sched.mnext - sched.nmfreed)
5556 }
5557
5558 var prof struct {
5559 signalLock atomic.Uint32
5560
5561
5562
5563 hz atomic.Int32
5564 }
5565
5566 func _System() { _System() }
5567 func _ExternalCode() { _ExternalCode() }
5568 func _LostExternalCode() { _LostExternalCode() }
5569 func _GC() { _GC() }
5570 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5571 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5572 func _VDSO() { _VDSO() }
5573
5574
5575
5576
5577
5578 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5579 if prof.hz.Load() == 0 {
5580 return
5581 }
5582
5583
5584
5585
5586 if mp != nil && mp.profilehz == 0 {
5587 return
5588 }
5589
5590
5591
5592
5593
5594
5595
5596 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5597 if f := findfunc(pc); f.valid() {
5598 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5599 cpuprof.lostAtomic++
5600 return
5601 }
5602 }
5603 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5604
5605
5606
5607 cpuprof.lostAtomic++
5608 return
5609 }
5610 }
5611
5612
5613
5614
5615
5616
5617
5618 getg().m.mallocing++
5619
5620 var u unwinder
5621 var stk [maxCPUProfStack]uintptr
5622 n := 0
5623 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5624 cgoOff := 0
5625
5626
5627
5628
5629
5630 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5631 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5632 cgoOff++
5633 }
5634 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5635 mp.cgoCallers[0] = 0
5636 }
5637
5638
5639 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5640 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5641
5642
5643 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5644 } else if mp != nil && mp.vdsoSP != 0 {
5645
5646
5647 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5648 } else {
5649 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5650 }
5651 n += tracebackPCs(&u, 0, stk[n:])
5652
5653 if n <= 0 {
5654
5655
5656 n = 2
5657 if inVDSOPage(pc) {
5658 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5659 } else if pc > firstmoduledata.etext {
5660
5661 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5662 }
5663 stk[0] = pc
5664 if mp.preemptoff != "" {
5665 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5666 } else {
5667 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5668 }
5669 }
5670
5671 if prof.hz.Load() != 0 {
5672
5673
5674
5675 var tagPtr *unsafe.Pointer
5676 if gp != nil && gp.m != nil && gp.m.curg != nil {
5677 tagPtr = &gp.m.curg.labels
5678 }
5679 cpuprof.add(tagPtr, stk[:n])
5680
5681 gprof := gp
5682 var mp *m
5683 var pp *p
5684 if gp != nil && gp.m != nil {
5685 if gp.m.curg != nil {
5686 gprof = gp.m.curg
5687 }
5688 mp = gp.m
5689 pp = gp.m.p.ptr()
5690 }
5691 traceCPUSample(gprof, mp, pp, stk[:n])
5692 }
5693 getg().m.mallocing--
5694 }
5695
5696
5697
5698 func setcpuprofilerate(hz int32) {
5699
5700 if hz < 0 {
5701 hz = 0
5702 }
5703
5704
5705
5706 gp := getg()
5707 gp.m.locks++
5708
5709
5710
5711
5712 setThreadCPUProfiler(0)
5713
5714 for !prof.signalLock.CompareAndSwap(0, 1) {
5715 osyield()
5716 }
5717 if prof.hz.Load() != hz {
5718 setProcessCPUProfiler(hz)
5719 prof.hz.Store(hz)
5720 }
5721 prof.signalLock.Store(0)
5722
5723 lock(&sched.lock)
5724 sched.profilehz = hz
5725 unlock(&sched.lock)
5726
5727 if hz != 0 {
5728 setThreadCPUProfiler(hz)
5729 }
5730
5731 gp.m.locks--
5732 }
5733
5734
5735
5736 func (pp *p) init(id int32) {
5737 pp.id = id
5738 pp.status = _Pgcstop
5739 pp.sudogcache = pp.sudogbuf[:0]
5740 pp.deferpool = pp.deferpoolbuf[:0]
5741 pp.wbBuf.reset()
5742 if pp.mcache == nil {
5743 if id == 0 {
5744 if mcache0 == nil {
5745 throw("missing mcache?")
5746 }
5747
5748
5749 pp.mcache = mcache0
5750 } else {
5751 pp.mcache = allocmcache()
5752 }
5753 }
5754 if raceenabled && pp.raceprocctx == 0 {
5755 if id == 0 {
5756 pp.raceprocctx = raceprocctx0
5757 raceprocctx0 = 0
5758 } else {
5759 pp.raceprocctx = raceproccreate()
5760 }
5761 }
5762 lockInit(&pp.timers.mu, lockRankTimers)
5763
5764
5765
5766 timerpMask.set(id)
5767
5768
5769 idlepMask.clear(id)
5770 }
5771
5772
5773
5774
5775
5776 func (pp *p) destroy() {
5777 assertLockHeld(&sched.lock)
5778 assertWorldStopped()
5779
5780
5781 for pp.runqhead != pp.runqtail {
5782
5783 pp.runqtail--
5784 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5785
5786 globrunqputhead(gp)
5787 }
5788 if pp.runnext != 0 {
5789 globrunqputhead(pp.runnext.ptr())
5790 pp.runnext = 0
5791 }
5792
5793
5794 getg().m.p.ptr().timers.take(&pp.timers)
5795
5796
5797 if gcphase != _GCoff {
5798 wbBufFlush1(pp)
5799 pp.gcw.dispose()
5800 }
5801 clear(pp.sudogbuf[:])
5802 pp.sudogcache = pp.sudogbuf[:0]
5803 pp.pinnerCache = nil
5804 clear(pp.deferpoolbuf[:])
5805 pp.deferpool = pp.deferpoolbuf[:0]
5806 systemstack(func() {
5807 for i := 0; i < pp.mspancache.len; i++ {
5808
5809 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5810 }
5811 pp.mspancache.len = 0
5812 lock(&mheap_.lock)
5813 pp.pcache.flush(&mheap_.pages)
5814 unlock(&mheap_.lock)
5815 })
5816 freemcache(pp.mcache)
5817 pp.mcache = nil
5818 gfpurge(pp)
5819 if raceenabled {
5820 if pp.timers.raceCtx != 0 {
5821
5822
5823
5824
5825
5826 mp := getg().m
5827 phold := mp.p.ptr()
5828 mp.p.set(pp)
5829
5830 racectxend(pp.timers.raceCtx)
5831 pp.timers.raceCtx = 0
5832
5833 mp.p.set(phold)
5834 }
5835 raceprocdestroy(pp.raceprocctx)
5836 pp.raceprocctx = 0
5837 }
5838 pp.gcAssistTime = 0
5839 gcCleanups.queued += pp.cleanupsQueued
5840 pp.cleanupsQueued = 0
5841 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5842 pp.goroutinesCreated = 0
5843 pp.xRegs.free()
5844 pp.status = _Pdead
5845 }
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855 func procresize(nprocs int32) *p {
5856 assertLockHeld(&sched.lock)
5857 assertWorldStopped()
5858
5859 old := gomaxprocs
5860 if old < 0 || nprocs <= 0 {
5861 throw("procresize: invalid arg")
5862 }
5863 trace := traceAcquire()
5864 if trace.ok() {
5865 trace.Gomaxprocs(nprocs)
5866 traceRelease(trace)
5867 }
5868
5869
5870 now := nanotime()
5871 if sched.procresizetime != 0 {
5872 sched.totaltime += int64(old) * (now - sched.procresizetime)
5873 }
5874 sched.procresizetime = now
5875
5876 maskWords := (nprocs + 31) / 32
5877
5878
5879 if nprocs > int32(len(allp)) {
5880
5881
5882 lock(&allpLock)
5883 if nprocs <= int32(cap(allp)) {
5884 allp = allp[:nprocs]
5885 } else {
5886 nallp := make([]*p, nprocs)
5887
5888
5889 copy(nallp, allp[:cap(allp)])
5890 allp = nallp
5891 }
5892
5893 if maskWords <= int32(cap(idlepMask)) {
5894 idlepMask = idlepMask[:maskWords]
5895 timerpMask = timerpMask[:maskWords]
5896 } else {
5897 nidlepMask := make([]uint32, maskWords)
5898
5899 copy(nidlepMask, idlepMask)
5900 idlepMask = nidlepMask
5901
5902 ntimerpMask := make([]uint32, maskWords)
5903 copy(ntimerpMask, timerpMask)
5904 timerpMask = ntimerpMask
5905 }
5906 unlock(&allpLock)
5907 }
5908
5909
5910 for i := old; i < nprocs; i++ {
5911 pp := allp[i]
5912 if pp == nil {
5913 pp = new(p)
5914 }
5915 pp.init(i)
5916 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5917 }
5918
5919 gp := getg()
5920 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5921
5922 gp.m.p.ptr().status = _Prunning
5923 gp.m.p.ptr().mcache.prepareForSweep()
5924 } else {
5925
5926
5927
5928
5929
5930 if gp.m.p != 0 {
5931 trace := traceAcquire()
5932 if trace.ok() {
5933
5934
5935
5936 trace.GoSched()
5937 trace.ProcStop(gp.m.p.ptr())
5938 traceRelease(trace)
5939 }
5940 gp.m.p.ptr().m = 0
5941 }
5942 gp.m.p = 0
5943 pp := allp[0]
5944 pp.m = 0
5945 pp.status = _Pidle
5946 acquirep(pp)
5947 trace := traceAcquire()
5948 if trace.ok() {
5949 trace.GoStart()
5950 traceRelease(trace)
5951 }
5952 }
5953
5954
5955 mcache0 = nil
5956
5957
5958 for i := nprocs; i < old; i++ {
5959 pp := allp[i]
5960 pp.destroy()
5961
5962 }
5963
5964
5965 if int32(len(allp)) != nprocs {
5966 lock(&allpLock)
5967 allp = allp[:nprocs]
5968 idlepMask = idlepMask[:maskWords]
5969 timerpMask = timerpMask[:maskWords]
5970 unlock(&allpLock)
5971 }
5972
5973 var runnablePs *p
5974 for i := nprocs - 1; i >= 0; i-- {
5975 pp := allp[i]
5976 if gp.m.p.ptr() == pp {
5977 continue
5978 }
5979 pp.status = _Pidle
5980 if runqempty(pp) {
5981 pidleput(pp, now)
5982 } else {
5983 pp.m.set(mget())
5984 pp.link.set(runnablePs)
5985 runnablePs = pp
5986 }
5987 }
5988 stealOrder.reset(uint32(nprocs))
5989 var int32p *int32 = &gomaxprocs
5990 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5991 if old != nprocs {
5992
5993 gcCPULimiter.resetCapacity(now, nprocs)
5994 }
5995 return runnablePs
5996 }
5997
5998
5999
6000
6001
6002
6003
6004 func acquirep(pp *p) {
6005
6006 wirep(pp)
6007
6008
6009
6010
6011
6012 pp.mcache.prepareForSweep()
6013
6014 trace := traceAcquire()
6015 if trace.ok() {
6016 trace.ProcStart()
6017 traceRelease(trace)
6018 }
6019 }
6020
6021
6022
6023
6024
6025
6026
6027 func wirep(pp *p) {
6028 gp := getg()
6029
6030 if gp.m.p != 0 {
6031
6032
6033 systemstack(func() {
6034 throw("wirep: already in go")
6035 })
6036 }
6037 if pp.m != 0 || pp.status != _Pidle {
6038
6039
6040 systemstack(func() {
6041 id := int64(0)
6042 if pp.m != 0 {
6043 id = pp.m.ptr().id
6044 }
6045 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6046 throw("wirep: invalid p state")
6047 })
6048 }
6049 gp.m.p.set(pp)
6050 pp.m.set(gp.m)
6051 pp.status = _Prunning
6052 }
6053
6054
6055 func releasep() *p {
6056 trace := traceAcquire()
6057 if trace.ok() {
6058 trace.ProcStop(getg().m.p.ptr())
6059 traceRelease(trace)
6060 }
6061 return releasepNoTrace()
6062 }
6063
6064
6065 func releasepNoTrace() *p {
6066 gp := getg()
6067
6068 if gp.m.p == 0 {
6069 throw("releasep: invalid arg")
6070 }
6071 pp := gp.m.p.ptr()
6072 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6073 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6074 throw("releasep: invalid p state")
6075 }
6076 gp.m.p = 0
6077 pp.m = 0
6078 pp.status = _Pidle
6079 return pp
6080 }
6081
6082 func incidlelocked(v int32) {
6083 lock(&sched.lock)
6084 sched.nmidlelocked += v
6085 if v > 0 {
6086 checkdead()
6087 }
6088 unlock(&sched.lock)
6089 }
6090
6091
6092
6093
6094 func checkdead() {
6095 assertLockHeld(&sched.lock)
6096
6097
6098
6099
6100
6101
6102 if (islibrary || isarchive) && GOARCH != "wasm" {
6103 return
6104 }
6105
6106
6107
6108
6109
6110 if panicking.Load() > 0 {
6111 return
6112 }
6113
6114
6115
6116
6117
6118 var run0 int32
6119 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6120 run0 = 1
6121 }
6122
6123 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6124 if run > run0 {
6125 return
6126 }
6127 if run < 0 {
6128 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6129 unlock(&sched.lock)
6130 throw("checkdead: inconsistent counts")
6131 }
6132
6133 grunning := 0
6134 forEachG(func(gp *g) {
6135 if isSystemGoroutine(gp, false) {
6136 return
6137 }
6138 s := readgstatus(gp)
6139 switch s &^ _Gscan {
6140 case _Gwaiting,
6141 _Gpreempted:
6142 grunning++
6143 case _Grunnable,
6144 _Grunning,
6145 _Gsyscall:
6146 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6147 unlock(&sched.lock)
6148 throw("checkdead: runnable g")
6149 }
6150 })
6151 if grunning == 0 {
6152 unlock(&sched.lock)
6153 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6154 }
6155
6156
6157 if faketime != 0 {
6158 if when := timeSleepUntil(); when < maxWhen {
6159 faketime = when
6160
6161
6162 pp, _ := pidleget(faketime)
6163 if pp == nil {
6164
6165
6166 unlock(&sched.lock)
6167 throw("checkdead: no p for timer")
6168 }
6169 mp := mget()
6170 if mp == nil {
6171
6172
6173 unlock(&sched.lock)
6174 throw("checkdead: no m for timer")
6175 }
6176
6177
6178
6179 sched.nmspinning.Add(1)
6180 mp.spinning = true
6181 mp.nextp.set(pp)
6182 notewakeup(&mp.park)
6183 return
6184 }
6185 }
6186
6187
6188 for _, pp := range allp {
6189 if len(pp.timers.heap) > 0 {
6190 return
6191 }
6192 }
6193
6194 unlock(&sched.lock)
6195 fatal("all goroutines are asleep - deadlock!")
6196 }
6197
6198
6199
6200
6201
6202
6203 var forcegcperiod int64 = 2 * 60 * 1e9
6204
6205
6206
6207
6208 const haveSysmon = GOARCH != "wasm"
6209
6210
6211
6212
6213 func sysmon() {
6214 lock(&sched.lock)
6215 sched.nmsys++
6216 checkdead()
6217 unlock(&sched.lock)
6218
6219 lastgomaxprocs := int64(0)
6220 lasttrace := int64(0)
6221 idle := 0
6222 delay := uint32(0)
6223
6224 for {
6225 if idle == 0 {
6226 delay = 20
6227 } else if idle > 50 {
6228 delay *= 2
6229 }
6230 if delay > 10*1000 {
6231 delay = 10 * 1000
6232 }
6233 usleep(delay)
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250 now := nanotime()
6251 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6252 lock(&sched.lock)
6253 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6254 syscallWake := false
6255 next := timeSleepUntil()
6256 if next > now {
6257 sched.sysmonwait.Store(true)
6258 unlock(&sched.lock)
6259
6260
6261 sleep := forcegcperiod / 2
6262 if next-now < sleep {
6263 sleep = next - now
6264 }
6265 shouldRelax := sleep >= osRelaxMinNS
6266 if shouldRelax {
6267 osRelax(true)
6268 }
6269 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6270 if shouldRelax {
6271 osRelax(false)
6272 }
6273 lock(&sched.lock)
6274 sched.sysmonwait.Store(false)
6275 noteclear(&sched.sysmonnote)
6276 }
6277 if syscallWake {
6278 idle = 0
6279 delay = 20
6280 }
6281 }
6282 unlock(&sched.lock)
6283 }
6284
6285 lock(&sched.sysmonlock)
6286
6287
6288 now = nanotime()
6289
6290
6291 if *cgo_yield != nil {
6292 asmcgocall(*cgo_yield, nil)
6293 }
6294
6295 lastpoll := sched.lastpoll.Load()
6296 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6297 sched.lastpoll.CompareAndSwap(lastpoll, now)
6298 list, delta := netpoll(0)
6299 if !list.empty() {
6300
6301
6302
6303
6304
6305
6306
6307 incidlelocked(-1)
6308 injectglist(&list)
6309 incidlelocked(1)
6310 netpollAdjustWaiters(delta)
6311 }
6312 }
6313
6314 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6315 sysmonUpdateGOMAXPROCS()
6316 lastgomaxprocs = now
6317 }
6318 if scavenger.sysmonWake.Load() != 0 {
6319
6320 scavenger.wake()
6321 }
6322
6323
6324 if retake(now) != 0 {
6325 idle = 0
6326 } else {
6327 idle++
6328 }
6329
6330 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6331 lock(&forcegc.lock)
6332 forcegc.idle.Store(false)
6333 var list gList
6334 list.push(forcegc.g)
6335 injectglist(&list)
6336 unlock(&forcegc.lock)
6337 }
6338 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6339 lasttrace = now
6340 schedtrace(debug.scheddetail > 0)
6341 }
6342 unlock(&sched.sysmonlock)
6343 }
6344 }
6345
6346 type sysmontick struct {
6347 schedtick uint32
6348 syscalltick uint32
6349 schedwhen int64
6350 syscallwhen int64
6351 }
6352
6353
6354
6355 const forcePreemptNS = 10 * 1000 * 1000
6356
6357 func retake(now int64) uint32 {
6358 n := 0
6359
6360
6361 lock(&allpLock)
6362
6363
6364
6365 for i := 0; i < len(allp); i++ {
6366 pp := allp[i]
6367 if pp == nil {
6368
6369
6370 continue
6371 }
6372 pd := &pp.sysmontick
6373 s := pp.status
6374 sysretake := false
6375 if s == _Prunning || s == _Psyscall {
6376
6377
6378
6379
6380 t := int64(pp.schedtick)
6381 if int64(pd.schedtick) != t {
6382 pd.schedtick = uint32(t)
6383 pd.schedwhen = now
6384 } else if pd.schedwhen+forcePreemptNS <= now {
6385 preemptone(pp)
6386
6387
6388 sysretake = true
6389 }
6390 }
6391 if s == _Psyscall {
6392
6393 t := int64(pp.syscalltick)
6394 if !sysretake && int64(pd.syscalltick) != t {
6395 pd.syscalltick = uint32(t)
6396 pd.syscallwhen = now
6397 continue
6398 }
6399
6400
6401
6402 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6403 continue
6404 }
6405
6406 unlock(&allpLock)
6407
6408
6409
6410
6411 incidlelocked(-1)
6412 trace := traceAcquire()
6413 if atomic.Cas(&pp.status, s, _Pidle) {
6414 if trace.ok() {
6415 trace.ProcSteal(pp, false)
6416 traceRelease(trace)
6417 }
6418 sched.nGsyscallNoP.Add(1)
6419 n++
6420 pp.syscalltick++
6421 handoffp(pp)
6422 } else if trace.ok() {
6423 traceRelease(trace)
6424 }
6425 incidlelocked(1)
6426 lock(&allpLock)
6427 }
6428 }
6429 unlock(&allpLock)
6430 return uint32(n)
6431 }
6432
6433
6434
6435
6436
6437
6438 func preemptall() bool {
6439 res := false
6440 for _, pp := range allp {
6441 if pp.status != _Prunning {
6442 continue
6443 }
6444 if preemptone(pp) {
6445 res = true
6446 }
6447 }
6448 return res
6449 }
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461 func preemptone(pp *p) bool {
6462 mp := pp.m.ptr()
6463 if mp == nil || mp == getg().m {
6464 return false
6465 }
6466 gp := mp.curg
6467 if gp == nil || gp == mp.g0 {
6468 return false
6469 }
6470
6471 gp.preempt = true
6472
6473
6474
6475
6476
6477 gp.stackguard0 = stackPreempt
6478
6479
6480 if preemptMSupported && debug.asyncpreemptoff == 0 {
6481 pp.preempt = true
6482 preemptM(mp)
6483 }
6484
6485 return true
6486 }
6487
6488 var starttime int64
6489
6490 func schedtrace(detailed bool) {
6491 now := nanotime()
6492 if starttime == 0 {
6493 starttime = now
6494 }
6495
6496 lock(&sched.lock)
6497 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6498 if detailed {
6499 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6500 }
6501
6502
6503
6504 for i, pp := range allp {
6505 h := atomic.Load(&pp.runqhead)
6506 t := atomic.Load(&pp.runqtail)
6507 if detailed {
6508 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6509 mp := pp.m.ptr()
6510 if mp != nil {
6511 print(mp.id)
6512 } else {
6513 print("nil")
6514 }
6515 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6516 } else {
6517
6518
6519 print(" ")
6520 if i == 0 {
6521 print("[ ")
6522 }
6523 print(t - h)
6524 if i == len(allp)-1 {
6525 print(" ]")
6526 }
6527 }
6528 }
6529
6530 if !detailed {
6531
6532 print(" schedticks=[ ")
6533 for _, pp := range allp {
6534 print(pp.schedtick)
6535 print(" ")
6536 }
6537 print("]\n")
6538 }
6539
6540 if !detailed {
6541 unlock(&sched.lock)
6542 return
6543 }
6544
6545 for mp := allm; mp != nil; mp = mp.alllink {
6546 pp := mp.p.ptr()
6547 print(" M", mp.id, ": p=")
6548 if pp != nil {
6549 print(pp.id)
6550 } else {
6551 print("nil")
6552 }
6553 print(" curg=")
6554 if mp.curg != nil {
6555 print(mp.curg.goid)
6556 } else {
6557 print("nil")
6558 }
6559 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6560 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6561 print(lockedg.goid)
6562 } else {
6563 print("nil")
6564 }
6565 print("\n")
6566 }
6567
6568 forEachG(func(gp *g) {
6569 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6570 if gp.m != nil {
6571 print(gp.m.id)
6572 } else {
6573 print("nil")
6574 }
6575 print(" lockedm=")
6576 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6577 print(lockedm.id)
6578 } else {
6579 print("nil")
6580 }
6581 print("\n")
6582 })
6583 unlock(&sched.lock)
6584 }
6585
6586 type updateMaxProcsGState struct {
6587 lock mutex
6588 g *g
6589 idle atomic.Bool
6590
6591
6592 procs int32
6593 }
6594
6595 var (
6596
6597
6598 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6599
6600
6601
6602 updateMaxProcsG updateMaxProcsGState
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651 computeMaxProcsLock mutex
6652 )
6653
6654
6655
6656
6657 func defaultGOMAXPROCSUpdateEnable() {
6658 if debug.updatemaxprocs == 0 {
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670 updatemaxprocs.IncNonDefault()
6671 return
6672 }
6673
6674 go updateMaxProcsGoroutine()
6675 }
6676
6677 func updateMaxProcsGoroutine() {
6678 updateMaxProcsG.g = getg()
6679 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6680 for {
6681 lock(&updateMaxProcsG.lock)
6682 if updateMaxProcsG.idle.Load() {
6683 throw("updateMaxProcsGoroutine: phase error")
6684 }
6685 updateMaxProcsG.idle.Store(true)
6686 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6687
6688
6689 stw := stopTheWorldGC(stwGOMAXPROCS)
6690
6691
6692 lock(&sched.lock)
6693 custom := sched.customGOMAXPROCS
6694 unlock(&sched.lock)
6695 if custom {
6696 startTheWorldGC(stw)
6697 return
6698 }
6699
6700
6701
6702
6703
6704 newprocs = updateMaxProcsG.procs
6705 lock(&sched.lock)
6706 sched.customGOMAXPROCS = false
6707 unlock(&sched.lock)
6708
6709 startTheWorldGC(stw)
6710 }
6711 }
6712
6713 func sysmonUpdateGOMAXPROCS() {
6714
6715 lock(&computeMaxProcsLock)
6716
6717
6718 lock(&sched.lock)
6719 custom := sched.customGOMAXPROCS
6720 curr := gomaxprocs
6721 unlock(&sched.lock)
6722 if custom {
6723 unlock(&computeMaxProcsLock)
6724 return
6725 }
6726
6727
6728 procs := defaultGOMAXPROCS(0)
6729 unlock(&computeMaxProcsLock)
6730 if procs == curr {
6731
6732 return
6733 }
6734
6735
6736
6737
6738 if updateMaxProcsG.idle.Load() {
6739 lock(&updateMaxProcsG.lock)
6740 updateMaxProcsG.procs = procs
6741 updateMaxProcsG.idle.Store(false)
6742 var list gList
6743 list.push(updateMaxProcsG.g)
6744 injectglist(&list)
6745 unlock(&updateMaxProcsG.lock)
6746 }
6747 }
6748
6749
6750
6751
6752
6753
6754 func schedEnableUser(enable bool) {
6755 lock(&sched.lock)
6756 if sched.disable.user == !enable {
6757 unlock(&sched.lock)
6758 return
6759 }
6760 sched.disable.user = !enable
6761 if enable {
6762 n := sched.disable.runnable.size
6763 globrunqputbatch(&sched.disable.runnable)
6764 unlock(&sched.lock)
6765 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6766 startm(nil, false, false)
6767 }
6768 } else {
6769 unlock(&sched.lock)
6770 }
6771 }
6772
6773
6774
6775
6776
6777 func schedEnabled(gp *g) bool {
6778 assertLockHeld(&sched.lock)
6779
6780 if sched.disable.user {
6781 return isSystemGoroutine(gp, true)
6782 }
6783 return true
6784 }
6785
6786
6787
6788
6789
6790
6791 func mput(mp *m) {
6792 assertLockHeld(&sched.lock)
6793
6794 mp.schedlink = sched.midle
6795 sched.midle.set(mp)
6796 sched.nmidle++
6797 checkdead()
6798 }
6799
6800
6801
6802
6803
6804
6805 func mget() *m {
6806 assertLockHeld(&sched.lock)
6807
6808 mp := sched.midle.ptr()
6809 if mp != nil {
6810 sched.midle = mp.schedlink
6811 sched.nmidle--
6812 }
6813 return mp
6814 }
6815
6816
6817
6818
6819
6820
6821 func globrunqput(gp *g) {
6822 assertLockHeld(&sched.lock)
6823
6824 sched.runq.pushBack(gp)
6825 }
6826
6827
6828
6829
6830
6831
6832 func globrunqputhead(gp *g) {
6833 assertLockHeld(&sched.lock)
6834
6835 sched.runq.push(gp)
6836 }
6837
6838
6839
6840
6841
6842
6843
6844 func globrunqputbatch(batch *gQueue) {
6845 assertLockHeld(&sched.lock)
6846
6847 sched.runq.pushBackAll(*batch)
6848 *batch = gQueue{}
6849 }
6850
6851
6852
6853 func globrunqget() *g {
6854 assertLockHeld(&sched.lock)
6855
6856 if sched.runq.size == 0 {
6857 return nil
6858 }
6859
6860 return sched.runq.pop()
6861 }
6862
6863
6864
6865 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
6866 assertLockHeld(&sched.lock)
6867
6868 if sched.runq.size == 0 {
6869 return
6870 }
6871
6872 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
6873
6874 gp = sched.runq.pop()
6875 n--
6876
6877 for ; n > 0; n-- {
6878 gp1 := sched.runq.pop()
6879 q.pushBack(gp1)
6880 }
6881 return
6882 }
6883
6884
6885 type pMask []uint32
6886
6887
6888 func (p pMask) read(id uint32) bool {
6889 word := id / 32
6890 mask := uint32(1) << (id % 32)
6891 return (atomic.Load(&p[word]) & mask) != 0
6892 }
6893
6894
6895 func (p pMask) set(id int32) {
6896 word := id / 32
6897 mask := uint32(1) << (id % 32)
6898 atomic.Or(&p[word], mask)
6899 }
6900
6901
6902 func (p pMask) clear(id int32) {
6903 word := id / 32
6904 mask := uint32(1) << (id % 32)
6905 atomic.And(&p[word], ^mask)
6906 }
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919 func pidleput(pp *p, now int64) int64 {
6920 assertLockHeld(&sched.lock)
6921
6922 if !runqempty(pp) {
6923 throw("pidleput: P has non-empty run queue")
6924 }
6925 if now == 0 {
6926 now = nanotime()
6927 }
6928 if pp.timers.len.Load() == 0 {
6929 timerpMask.clear(pp.id)
6930 }
6931 idlepMask.set(pp.id)
6932 pp.link = sched.pidle
6933 sched.pidle.set(pp)
6934 sched.npidle.Add(1)
6935 if !pp.limiterEvent.start(limiterEventIdle, now) {
6936 throw("must be able to track idle limiter event")
6937 }
6938 return now
6939 }
6940
6941
6942
6943
6944
6945
6946
6947
6948 func pidleget(now int64) (*p, int64) {
6949 assertLockHeld(&sched.lock)
6950
6951 pp := sched.pidle.ptr()
6952 if pp != nil {
6953
6954 if now == 0 {
6955 now = nanotime()
6956 }
6957 timerpMask.set(pp.id)
6958 idlepMask.clear(pp.id)
6959 sched.pidle = pp.link
6960 sched.npidle.Add(-1)
6961 pp.limiterEvent.stop(limiterEventIdle, now)
6962 }
6963 return pp, now
6964 }
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976 func pidlegetSpinning(now int64) (*p, int64) {
6977 assertLockHeld(&sched.lock)
6978
6979 pp, now := pidleget(now)
6980 if pp == nil {
6981
6982
6983
6984 sched.needspinning.Store(1)
6985 return nil, now
6986 }
6987
6988 return pp, now
6989 }
6990
6991
6992
6993 func runqempty(pp *p) bool {
6994
6995
6996
6997
6998 for {
6999 head := atomic.Load(&pp.runqhead)
7000 tail := atomic.Load(&pp.runqtail)
7001 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7002 if tail == atomic.Load(&pp.runqtail) {
7003 return head == tail && runnext == 0
7004 }
7005 }
7006 }
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017 const randomizeScheduler = raceenabled
7018
7019
7020
7021
7022
7023
7024 func runqput(pp *p, gp *g, next bool) {
7025 if !haveSysmon && next {
7026
7027
7028
7029
7030
7031
7032
7033
7034 next = false
7035 }
7036 if randomizeScheduler && next && randn(2) == 0 {
7037 next = false
7038 }
7039
7040 if next {
7041 retryNext:
7042 oldnext := pp.runnext
7043 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7044 goto retryNext
7045 }
7046 if oldnext == 0 {
7047 return
7048 }
7049
7050 gp = oldnext.ptr()
7051 }
7052
7053 retry:
7054 h := atomic.LoadAcq(&pp.runqhead)
7055 t := pp.runqtail
7056 if t-h < uint32(len(pp.runq)) {
7057 pp.runq[t%uint32(len(pp.runq))].set(gp)
7058 atomic.StoreRel(&pp.runqtail, t+1)
7059 return
7060 }
7061 if runqputslow(pp, gp, h, t) {
7062 return
7063 }
7064
7065 goto retry
7066 }
7067
7068
7069
7070 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7071 var batch [len(pp.runq)/2 + 1]*g
7072
7073
7074 n := t - h
7075 n = n / 2
7076 if n != uint32(len(pp.runq)/2) {
7077 throw("runqputslow: queue is not full")
7078 }
7079 for i := uint32(0); i < n; i++ {
7080 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7081 }
7082 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7083 return false
7084 }
7085 batch[n] = gp
7086
7087 if randomizeScheduler {
7088 for i := uint32(1); i <= n; i++ {
7089 j := cheaprandn(i + 1)
7090 batch[i], batch[j] = batch[j], batch[i]
7091 }
7092 }
7093
7094
7095 for i := uint32(0); i < n; i++ {
7096 batch[i].schedlink.set(batch[i+1])
7097 }
7098
7099 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7100
7101
7102 lock(&sched.lock)
7103 globrunqputbatch(&q)
7104 unlock(&sched.lock)
7105 return true
7106 }
7107
7108
7109
7110
7111 func runqputbatch(pp *p, q *gQueue) {
7112 if q.empty() {
7113 return
7114 }
7115 h := atomic.LoadAcq(&pp.runqhead)
7116 t := pp.runqtail
7117 n := uint32(0)
7118 for !q.empty() && t-h < uint32(len(pp.runq)) {
7119 gp := q.pop()
7120 pp.runq[t%uint32(len(pp.runq))].set(gp)
7121 t++
7122 n++
7123 }
7124
7125 if randomizeScheduler {
7126 off := func(o uint32) uint32 {
7127 return (pp.runqtail + o) % uint32(len(pp.runq))
7128 }
7129 for i := uint32(1); i < n; i++ {
7130 j := cheaprandn(i + 1)
7131 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7132 }
7133 }
7134
7135 atomic.StoreRel(&pp.runqtail, t)
7136
7137 return
7138 }
7139
7140
7141
7142
7143
7144 func runqget(pp *p) (gp *g, inheritTime bool) {
7145
7146 next := pp.runnext
7147
7148
7149
7150 if next != 0 && pp.runnext.cas(next, 0) {
7151 return next.ptr(), true
7152 }
7153
7154 for {
7155 h := atomic.LoadAcq(&pp.runqhead)
7156 t := pp.runqtail
7157 if t == h {
7158 return nil, false
7159 }
7160 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7161 if atomic.CasRel(&pp.runqhead, h, h+1) {
7162 return gp, false
7163 }
7164 }
7165 }
7166
7167
7168
7169 func runqdrain(pp *p) (drainQ gQueue) {
7170 oldNext := pp.runnext
7171 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7172 drainQ.pushBack(oldNext.ptr())
7173 }
7174
7175 retry:
7176 h := atomic.LoadAcq(&pp.runqhead)
7177 t := pp.runqtail
7178 qn := t - h
7179 if qn == 0 {
7180 return
7181 }
7182 if qn > uint32(len(pp.runq)) {
7183 goto retry
7184 }
7185
7186 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7187 goto retry
7188 }
7189
7190
7191
7192
7193
7194
7195
7196
7197 for i := uint32(0); i < qn; i++ {
7198 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7199 drainQ.pushBack(gp)
7200 }
7201 return
7202 }
7203
7204
7205
7206
7207
7208 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7209 for {
7210 h := atomic.LoadAcq(&pp.runqhead)
7211 t := atomic.LoadAcq(&pp.runqtail)
7212 n := t - h
7213 n = n - n/2
7214 if n == 0 {
7215 if stealRunNextG {
7216
7217 if next := pp.runnext; next != 0 {
7218 if pp.status == _Prunning {
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229 if !osHasLowResTimer {
7230 usleep(3)
7231 } else {
7232
7233
7234
7235 osyield()
7236 }
7237 }
7238 if !pp.runnext.cas(next, 0) {
7239 continue
7240 }
7241 batch[batchHead%uint32(len(batch))] = next
7242 return 1
7243 }
7244 }
7245 return 0
7246 }
7247 if n > uint32(len(pp.runq)/2) {
7248 continue
7249 }
7250 for i := uint32(0); i < n; i++ {
7251 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7252 batch[(batchHead+i)%uint32(len(batch))] = g
7253 }
7254 if atomic.CasRel(&pp.runqhead, h, h+n) {
7255 return n
7256 }
7257 }
7258 }
7259
7260
7261
7262
7263 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7264 t := pp.runqtail
7265 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7266 if n == 0 {
7267 return nil
7268 }
7269 n--
7270 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7271 if n == 0 {
7272 return gp
7273 }
7274 h := atomic.LoadAcq(&pp.runqhead)
7275 if t-h+n >= uint32(len(pp.runq)) {
7276 throw("runqsteal: runq overflow")
7277 }
7278 atomic.StoreRel(&pp.runqtail, t+n)
7279 return gp
7280 }
7281
7282
7283
7284 type gQueue struct {
7285 head guintptr
7286 tail guintptr
7287 size int32
7288 }
7289
7290
7291 func (q *gQueue) empty() bool {
7292 return q.head == 0
7293 }
7294
7295
7296 func (q *gQueue) push(gp *g) {
7297 gp.schedlink = q.head
7298 q.head.set(gp)
7299 if q.tail == 0 {
7300 q.tail.set(gp)
7301 }
7302 q.size++
7303 }
7304
7305
7306 func (q *gQueue) pushBack(gp *g) {
7307 gp.schedlink = 0
7308 if q.tail != 0 {
7309 q.tail.ptr().schedlink.set(gp)
7310 } else {
7311 q.head.set(gp)
7312 }
7313 q.tail.set(gp)
7314 q.size++
7315 }
7316
7317
7318
7319 func (q *gQueue) pushBackAll(q2 gQueue) {
7320 if q2.tail == 0 {
7321 return
7322 }
7323 q2.tail.ptr().schedlink = 0
7324 if q.tail != 0 {
7325 q.tail.ptr().schedlink = q2.head
7326 } else {
7327 q.head = q2.head
7328 }
7329 q.tail = q2.tail
7330 q.size += q2.size
7331 }
7332
7333
7334
7335 func (q *gQueue) pop() *g {
7336 gp := q.head.ptr()
7337 if gp != nil {
7338 q.head = gp.schedlink
7339 if q.head == 0 {
7340 q.tail = 0
7341 }
7342 q.size--
7343 }
7344 return gp
7345 }
7346
7347
7348 func (q *gQueue) popList() gList {
7349 stack := gList{q.head, q.size}
7350 *q = gQueue{}
7351 return stack
7352 }
7353
7354
7355
7356 type gList struct {
7357 head guintptr
7358 size int32
7359 }
7360
7361
7362 func (l *gList) empty() bool {
7363 return l.head == 0
7364 }
7365
7366
7367 func (l *gList) push(gp *g) {
7368 gp.schedlink = l.head
7369 l.head.set(gp)
7370 l.size++
7371 }
7372
7373
7374 func (l *gList) pushAll(q gQueue) {
7375 if !q.empty() {
7376 q.tail.ptr().schedlink = l.head
7377 l.head = q.head
7378 l.size += q.size
7379 }
7380 }
7381
7382
7383 func (l *gList) pop() *g {
7384 gp := l.head.ptr()
7385 if gp != nil {
7386 l.head = gp.schedlink
7387 l.size--
7388 }
7389 return gp
7390 }
7391
7392
7393 func setMaxThreads(in int) (out int) {
7394 lock(&sched.lock)
7395 out = int(sched.maxmcount)
7396 if in > 0x7fffffff {
7397 sched.maxmcount = 0x7fffffff
7398 } else {
7399 sched.maxmcount = int32(in)
7400 }
7401 checkmcount()
7402 unlock(&sched.lock)
7403 return
7404 }
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418 func procPin() int {
7419 gp := getg()
7420 mp := gp.m
7421
7422 mp.locks++
7423 return int(mp.p.ptr().id)
7424 }
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438 func procUnpin() {
7439 gp := getg()
7440 gp.m.locks--
7441 }
7442
7443
7444
7445 func sync_runtime_procPin() int {
7446 return procPin()
7447 }
7448
7449
7450
7451 func sync_runtime_procUnpin() {
7452 procUnpin()
7453 }
7454
7455
7456
7457 func sync_atomic_runtime_procPin() int {
7458 return procPin()
7459 }
7460
7461
7462
7463 func sync_atomic_runtime_procUnpin() {
7464 procUnpin()
7465 }
7466
7467
7468
7469
7470
7471 func internal_sync_runtime_canSpin(i int) bool {
7472
7473
7474
7475
7476
7477 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7478 return false
7479 }
7480 if p := getg().m.p.ptr(); !runqempty(p) {
7481 return false
7482 }
7483 return true
7484 }
7485
7486
7487
7488 func internal_sync_runtime_doSpin() {
7489 procyield(active_spin_cnt)
7490 }
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506 func sync_runtime_canSpin(i int) bool {
7507 return internal_sync_runtime_canSpin(i)
7508 }
7509
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522 func sync_runtime_doSpin() {
7523 internal_sync_runtime_doSpin()
7524 }
7525
7526 var stealOrder randomOrder
7527
7528
7529
7530
7531
7532 type randomOrder struct {
7533 count uint32
7534 coprimes []uint32
7535 }
7536
7537 type randomEnum struct {
7538 i uint32
7539 count uint32
7540 pos uint32
7541 inc uint32
7542 }
7543
7544 func (ord *randomOrder) reset(count uint32) {
7545 ord.count = count
7546 ord.coprimes = ord.coprimes[:0]
7547 for i := uint32(1); i <= count; i++ {
7548 if gcd(i, count) == 1 {
7549 ord.coprimes = append(ord.coprimes, i)
7550 }
7551 }
7552 }
7553
7554 func (ord *randomOrder) start(i uint32) randomEnum {
7555 return randomEnum{
7556 count: ord.count,
7557 pos: i % ord.count,
7558 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7559 }
7560 }
7561
7562 func (enum *randomEnum) done() bool {
7563 return enum.i == enum.count
7564 }
7565
7566 func (enum *randomEnum) next() {
7567 enum.i++
7568 enum.pos = (enum.pos + enum.inc) % enum.count
7569 }
7570
7571 func (enum *randomEnum) position() uint32 {
7572 return enum.pos
7573 }
7574
7575 func gcd(a, b uint32) uint32 {
7576 for b != 0 {
7577 a, b = b, a%b
7578 }
7579 return a
7580 }
7581
7582
7583
7584 type initTask struct {
7585 state uint32
7586 nfns uint32
7587
7588 }
7589
7590
7591
7592 var inittrace tracestat
7593
7594 type tracestat struct {
7595 active bool
7596 id uint64
7597 allocs uint64
7598 bytes uint64
7599 }
7600
7601 func doInit(ts []*initTask) {
7602 for _, t := range ts {
7603 doInit1(t)
7604 }
7605 }
7606
7607 func doInit1(t *initTask) {
7608 switch t.state {
7609 case 2:
7610 return
7611 case 1:
7612 throw("recursive call during initialization - linker skew")
7613 default:
7614 t.state = 1
7615
7616 var (
7617 start int64
7618 before tracestat
7619 )
7620
7621 if inittrace.active {
7622 start = nanotime()
7623
7624 before = inittrace
7625 }
7626
7627 if t.nfns == 0 {
7628
7629 throw("inittask with no functions")
7630 }
7631
7632 firstFunc := add(unsafe.Pointer(t), 8)
7633 for i := uint32(0); i < t.nfns; i++ {
7634 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7635 f := *(*func())(unsafe.Pointer(&p))
7636 f()
7637 }
7638
7639 if inittrace.active {
7640 end := nanotime()
7641
7642 after := inittrace
7643
7644 f := *(*func())(unsafe.Pointer(&firstFunc))
7645 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7646
7647 var sbuf [24]byte
7648 print("init ", pkg, " @")
7649 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7650 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7651 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7652 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7653 print("\n")
7654 }
7655
7656 t.state = 2
7657 }
7658 }
7659
View as plain text