Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/stringslite"
15 "runtime/internal/sys"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 return
270 }
271 fn := main_main
272 fn()
273 if raceenabled {
274 runExitHooks(0)
275 racefini()
276 }
277
278
279
280
281
282 if runningPanicDefers.Load() != 0 {
283
284 for c := 0; c < 1000; c++ {
285 if runningPanicDefers.Load() == 0 {
286 break
287 }
288 Gosched()
289 }
290 }
291 if panicking.Load() != 0 {
292 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
293 }
294 runExitHooks(0)
295
296 exit(0)
297 for {
298 var x *int32
299 *x = 0
300 }
301 }
302
303
304
305
306 func os_beforeExit(exitCode int) {
307 runExitHooks(exitCode)
308 if exitCode == 0 && raceenabled {
309 racefini()
310 }
311 }
312
313 func init() {
314 exithook.Gosched = Gosched
315 exithook.Goid = func() uint64 { return getg().goid }
316 exithook.Throw = throw
317 }
318
319 func runExitHooks(code int) {
320 exithook.Run(code)
321 }
322
323
324 func init() {
325 go forcegchelper()
326 }
327
328 func forcegchelper() {
329 forcegc.g = getg()
330 lockInit(&forcegc.lock, lockRankForcegc)
331 for {
332 lock(&forcegc.lock)
333 if forcegc.idle.Load() {
334 throw("forcegc: phase error")
335 }
336 forcegc.idle.Store(true)
337 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
338
339 if debug.gctrace > 0 {
340 println("GC forced")
341 }
342
343 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
344 }
345 }
346
347
348
349
350
351 func Gosched() {
352 checkTimeouts()
353 mcall(gosched_m)
354 }
355
356
357
358
359
360 func goschedguarded() {
361 mcall(goschedguarded_m)
362 }
363
364
365
366
367
368
369 func goschedIfBusy() {
370 gp := getg()
371
372
373 if !gp.preempt && sched.npidle.Load() > 0 {
374 return
375 }
376 mcall(gosched_m)
377 }
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
408 if reason != waitReasonSleep {
409 checkTimeouts()
410 }
411 mp := acquirem()
412 gp := mp.curg
413 status := readgstatus(gp)
414 if status != _Grunning && status != _Gscanrunning {
415 throw("gopark: bad g status")
416 }
417 mp.waitlock = lock
418 mp.waitunlockf = unlockf
419 gp.waitreason = reason
420 mp.waitTraceBlockReason = traceReason
421 mp.waitTraceSkip = traceskip
422 releasem(mp)
423
424 mcall(park_m)
425 }
426
427
428
429 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
430 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
431 }
432
433
434
435
436
437
438
439
440
441
442
443 func goready(gp *g, traceskip int) {
444 systemstack(func() {
445 ready(gp, traceskip, true)
446 })
447 }
448
449
450 func acquireSudog() *sudog {
451
452
453
454
455
456
457
458
459 mp := acquirem()
460 pp := mp.p.ptr()
461 if len(pp.sudogcache) == 0 {
462 lock(&sched.sudoglock)
463
464 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
465 s := sched.sudogcache
466 sched.sudogcache = s.next
467 s.next = nil
468 pp.sudogcache = append(pp.sudogcache, s)
469 }
470 unlock(&sched.sudoglock)
471
472 if len(pp.sudogcache) == 0 {
473 pp.sudogcache = append(pp.sudogcache, new(sudog))
474 }
475 }
476 n := len(pp.sudogcache)
477 s := pp.sudogcache[n-1]
478 pp.sudogcache[n-1] = nil
479 pp.sudogcache = pp.sudogcache[:n-1]
480 if s.elem != nil {
481 throw("acquireSudog: found s.elem != nil in cache")
482 }
483 releasem(mp)
484 return s
485 }
486
487
488 func releaseSudog(s *sudog) {
489 if s.elem != nil {
490 throw("runtime: sudog with non-nil elem")
491 }
492 if s.isSelect {
493 throw("runtime: sudog with non-false isSelect")
494 }
495 if s.next != nil {
496 throw("runtime: sudog with non-nil next")
497 }
498 if s.prev != nil {
499 throw("runtime: sudog with non-nil prev")
500 }
501 if s.waitlink != nil {
502 throw("runtime: sudog with non-nil waitlink")
503 }
504 if s.c != nil {
505 throw("runtime: sudog with non-nil c")
506 }
507 gp := getg()
508 if gp.param != nil {
509 throw("runtime: releaseSudog with non-nil gp.param")
510 }
511 mp := acquirem()
512 pp := mp.p.ptr()
513 if len(pp.sudogcache) == cap(pp.sudogcache) {
514
515 var first, last *sudog
516 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
517 n := len(pp.sudogcache)
518 p := pp.sudogcache[n-1]
519 pp.sudogcache[n-1] = nil
520 pp.sudogcache = pp.sudogcache[:n-1]
521 if first == nil {
522 first = p
523 } else {
524 last.next = p
525 }
526 last = p
527 }
528 lock(&sched.sudoglock)
529 last.next = sched.sudogcache
530 sched.sudogcache = first
531 unlock(&sched.sudoglock)
532 }
533 pp.sudogcache = append(pp.sudogcache, s)
534 releasem(mp)
535 }
536
537
538 func badmcall(fn func(*g)) {
539 throw("runtime: mcall called on m->g0 stack")
540 }
541
542 func badmcall2(fn func(*g)) {
543 throw("runtime: mcall function returned")
544 }
545
546 func badreflectcall() {
547 panic(plainError("arg size to reflect.call more than 1GB"))
548 }
549
550
551
552 func badmorestackg0() {
553 if !crashStackImplemented {
554 writeErrStr("fatal: morestack on g0\n")
555 return
556 }
557
558 g := getg()
559 switchToCrashStack(func() {
560 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
561 g.m.traceback = 2
562 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
563 print("\n")
564
565 throw("morestack on g0")
566 })
567 }
568
569
570
571 func badmorestackgsignal() {
572 writeErrStr("fatal: morestack on gsignal\n")
573 }
574
575
576 func badctxt() {
577 throw("ctxt != 0")
578 }
579
580
581
582 var gcrash g
583
584 var crashingG atomic.Pointer[g]
585
586
587
588
589
590
591
592
593
594 func switchToCrashStack(fn func()) {
595 me := getg()
596 if crashingG.CompareAndSwapNoWB(nil, me) {
597 switchToCrashStack0(fn)
598 abort()
599 }
600 if crashingG.Load() == me {
601
602 writeErrStr("fatal: recursive switchToCrashStack\n")
603 abort()
604 }
605
606 usleep_no_g(100)
607 writeErrStr("fatal: concurrent switchToCrashStack\n")
608 abort()
609 }
610
611
612
613
614 const crashStackImplemented = GOOS != "windows"
615
616
617 func switchToCrashStack0(fn func())
618
619 func lockedOSThread() bool {
620 gp := getg()
621 return gp.lockedm != 0 && gp.m.lockedg != 0
622 }
623
624 var (
625
626
627
628
629
630
631 allglock mutex
632 allgs []*g
633
634
635
636
637
638
639
640
641
642
643
644
645
646 allglen uintptr
647 allgptr **g
648 )
649
650 func allgadd(gp *g) {
651 if readgstatus(gp) == _Gidle {
652 throw("allgadd: bad status Gidle")
653 }
654
655 lock(&allglock)
656 allgs = append(allgs, gp)
657 if &allgs[0] != allgptr {
658 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
659 }
660 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
661 unlock(&allglock)
662 }
663
664
665
666
667 func allGsSnapshot() []*g {
668 assertWorldStoppedOrLockHeld(&allglock)
669
670
671
672
673
674
675 return allgs[:len(allgs):len(allgs)]
676 }
677
678
679 func atomicAllG() (**g, uintptr) {
680 length := atomic.Loaduintptr(&allglen)
681 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
682 return ptr, length
683 }
684
685
686 func atomicAllGIndex(ptr **g, i uintptr) *g {
687 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
688 }
689
690
691
692
693 func forEachG(fn func(gp *g)) {
694 lock(&allglock)
695 for _, gp := range allgs {
696 fn(gp)
697 }
698 unlock(&allglock)
699 }
700
701
702
703
704
705 func forEachGRace(fn func(gp *g)) {
706 ptr, length := atomicAllG()
707 for i := uintptr(0); i < length; i++ {
708 gp := atomicAllGIndex(ptr, i)
709 fn(gp)
710 }
711 return
712 }
713
714 const (
715
716
717 _GoidCacheBatch = 16
718 )
719
720
721
722 func cpuinit(env string) {
723 switch GOOS {
724 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
725 cpu.DebugOptions = true
726 }
727 cpu.Initialize(env)
728
729
730
731 switch GOARCH {
732 case "386", "amd64":
733 x86HasPOPCNT = cpu.X86.HasPOPCNT
734 x86HasSSE41 = cpu.X86.HasSSE41
735 x86HasFMA = cpu.X86.HasFMA
736
737 case "arm":
738 armHasVFPv4 = cpu.ARM.HasVFPv4
739
740 case "arm64":
741 arm64HasATOMICS = cpu.ARM64.HasATOMICS
742 }
743 }
744
745
746
747
748 func getGodebugEarly() string {
749 const prefix = "GODEBUG="
750 var env string
751 switch GOOS {
752 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
753
754
755
756 n := int32(0)
757 for argv_index(argv, argc+1+n) != nil {
758 n++
759 }
760
761 for i := int32(0); i < n; i++ {
762 p := argv_index(argv, argc+1+i)
763 s := unsafe.String(p, findnull(p))
764
765 if stringslite.HasPrefix(s, prefix) {
766 env = gostring(p)[len(prefix):]
767 break
768 }
769 }
770 }
771 return env
772 }
773
774
775
776
777
778
779
780
781
782 func schedinit() {
783 lockInit(&sched.lock, lockRankSched)
784 lockInit(&sched.sysmonlock, lockRankSysmon)
785 lockInit(&sched.deferlock, lockRankDefer)
786 lockInit(&sched.sudoglock, lockRankSudog)
787 lockInit(&deadlock, lockRankDeadlock)
788 lockInit(&paniclk, lockRankPanic)
789 lockInit(&allglock, lockRankAllg)
790 lockInit(&allpLock, lockRankAllp)
791 lockInit(&reflectOffs.lock, lockRankReflectOffs)
792 lockInit(&finlock, lockRankFin)
793 lockInit(&cpuprof.lock, lockRankCpuprof)
794 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
795 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
796 traceLockInit()
797
798
799
800 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
801
802
803
804 gp := getg()
805 if raceenabled {
806 gp.racectx, raceprocctx0 = raceinit()
807 }
808
809 sched.maxmcount = 10000
810 crashFD.Store(^uintptr(0))
811
812
813 worldStopped()
814
815 ticks.init()
816 moduledataverify()
817 stackinit()
818 mallocinit()
819 godebug := getGodebugEarly()
820 cpuinit(godebug)
821 randinit()
822 alginit()
823 mcommoninit(gp.m, -1)
824 modulesinit()
825 typelinksinit()
826 itabsinit()
827 stkobjinit()
828
829 sigsave(&gp.m.sigmask)
830 initSigmask = gp.m.sigmask
831
832 goargs()
833 goenvs()
834 secure()
835 checkfds()
836 parsedebugvars()
837 gcinit()
838
839
840
841 gcrash.stack = stackalloc(16384)
842 gcrash.stackguard0 = gcrash.stack.lo + 1000
843 gcrash.stackguard1 = gcrash.stack.lo + 1000
844
845
846
847
848
849 if disableMemoryProfiling {
850 MemProfileRate = 0
851 }
852
853
854 mProfStackInit(gp.m)
855
856 lock(&sched.lock)
857 sched.lastpoll.Store(nanotime())
858 procs := ncpu
859 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
860 procs = n
861 }
862 if procresize(procs) != nil {
863 throw("unknown runnable goroutine during bootstrap")
864 }
865 unlock(&sched.lock)
866
867
868 worldStarted()
869
870 if buildVersion == "" {
871
872
873 buildVersion = "unknown"
874 }
875 if len(modinfo) == 1 {
876
877
878 modinfo = ""
879 }
880 }
881
882 func dumpgstatus(gp *g) {
883 thisg := getg()
884 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
885 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
886 }
887
888
889 func checkmcount() {
890 assertLockHeld(&sched.lock)
891
892
893
894
895
896
897
898
899
900 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
901 if count > sched.maxmcount {
902 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
903 throw("thread exhaustion")
904 }
905 }
906
907
908
909
910
911 func mReserveID() int64 {
912 assertLockHeld(&sched.lock)
913
914 if sched.mnext+1 < sched.mnext {
915 throw("runtime: thread ID overflow")
916 }
917 id := sched.mnext
918 sched.mnext++
919 checkmcount()
920 return id
921 }
922
923
924 func mcommoninit(mp *m, id int64) {
925 gp := getg()
926
927
928 if gp != gp.m.g0 {
929 callers(1, mp.createstack[:])
930 }
931
932 lock(&sched.lock)
933
934 if id >= 0 {
935 mp.id = id
936 } else {
937 mp.id = mReserveID()
938 }
939
940 mrandinit(mp)
941
942 mpreinit(mp)
943 if mp.gsignal != nil {
944 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
945 }
946
947
948
949 mp.alllink = allm
950
951
952
953 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
954 unlock(&sched.lock)
955
956
957 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
958 mp.cgoCallers = new(cgoCallers)
959 }
960 mProfStackInit(mp)
961 }
962
963
964
965
966
967 func mProfStackInit(mp *m) {
968 if debug.profstackdepth == 0 {
969
970
971 return
972 }
973 mp.profStack = makeProfStackFP()
974 mp.mLockProfile.stack = makeProfStackFP()
975 }
976
977
978
979
980 func makeProfStackFP() []uintptr {
981
982
983
984
985
986
987 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
988 }
989
990
991
992 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
993
994
995 func pprof_makeProfStack() []uintptr { return makeProfStack() }
996
997 func (mp *m) becomeSpinning() {
998 mp.spinning = true
999 sched.nmspinning.Add(1)
1000 sched.needspinning.Store(0)
1001 }
1002
1003 func (mp *m) hasCgoOnStack() bool {
1004 return mp.ncgo > 0 || mp.isextra
1005 }
1006
1007 const (
1008
1009
1010 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1011
1012
1013
1014 osHasLowResClockInt = goos.IsWindows
1015
1016
1017
1018 osHasLowResClock = osHasLowResClockInt > 0
1019 )
1020
1021
1022 func ready(gp *g, traceskip int, next bool) {
1023 status := readgstatus(gp)
1024
1025
1026 mp := acquirem()
1027 if status&^_Gscan != _Gwaiting {
1028 dumpgstatus(gp)
1029 throw("bad g->status in ready")
1030 }
1031
1032
1033 trace := traceAcquire()
1034 casgstatus(gp, _Gwaiting, _Grunnable)
1035 if trace.ok() {
1036 trace.GoUnpark(gp, traceskip)
1037 traceRelease(trace)
1038 }
1039 runqput(mp.p.ptr(), gp, next)
1040 wakep()
1041 releasem(mp)
1042 }
1043
1044
1045
1046 const freezeStopWait = 0x7fffffff
1047
1048
1049
1050 var freezing atomic.Bool
1051
1052
1053
1054
1055 func freezetheworld() {
1056 freezing.Store(true)
1057 if debug.dontfreezetheworld > 0 {
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 usleep(1000)
1083 return
1084 }
1085
1086
1087
1088
1089 for i := 0; i < 5; i++ {
1090
1091 sched.stopwait = freezeStopWait
1092 sched.gcwaiting.Store(true)
1093
1094 if !preemptall() {
1095 break
1096 }
1097 usleep(1000)
1098 }
1099
1100 usleep(1000)
1101 preemptall()
1102 usleep(1000)
1103 }
1104
1105
1106
1107
1108
1109 func readgstatus(gp *g) uint32 {
1110 return gp.atomicstatus.Load()
1111 }
1112
1113
1114
1115
1116
1117 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1118 success := false
1119
1120
1121 switch oldval {
1122 default:
1123 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1124 dumpgstatus(gp)
1125 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1126 case _Gscanrunnable,
1127 _Gscanwaiting,
1128 _Gscanrunning,
1129 _Gscansyscall,
1130 _Gscanpreempted:
1131 if newval == oldval&^_Gscan {
1132 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1133 }
1134 }
1135 if !success {
1136 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1137 dumpgstatus(gp)
1138 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1139 }
1140 releaseLockRankAndM(lockRankGscan)
1141 }
1142
1143
1144
1145 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1146 switch oldval {
1147 case _Grunnable,
1148 _Grunning,
1149 _Gwaiting,
1150 _Gsyscall:
1151 if newval == oldval|_Gscan {
1152 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1153 if r {
1154 acquireLockRankAndM(lockRankGscan)
1155 }
1156 return r
1157
1158 }
1159 }
1160 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1161 throw("castogscanstatus")
1162 panic("not reached")
1163 }
1164
1165
1166
1167 var casgstatusAlwaysTrack = false
1168
1169
1170
1171
1172
1173
1174
1175 func casgstatus(gp *g, oldval, newval uint32) {
1176 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1177 systemstack(func() {
1178
1179
1180 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1181 throw("casgstatus: bad incoming values")
1182 })
1183 }
1184
1185 lockWithRankMayAcquire(nil, lockRankGscan)
1186
1187
1188 const yieldDelay = 5 * 1000
1189 var nextYield int64
1190
1191
1192
1193 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1194 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1195 systemstack(func() {
1196
1197
1198 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1199 })
1200 }
1201 if i == 0 {
1202 nextYield = nanotime() + yieldDelay
1203 }
1204 if nanotime() < nextYield {
1205 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1206 procyield(1)
1207 }
1208 } else {
1209 osyield()
1210 nextYield = nanotime() + yieldDelay/2
1211 }
1212 }
1213
1214 if oldval == _Grunning {
1215
1216 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1217 gp.tracking = true
1218 }
1219 gp.trackingSeq++
1220 }
1221 if !gp.tracking {
1222 return
1223 }
1224
1225
1226
1227
1228
1229
1230 switch oldval {
1231 case _Grunnable:
1232
1233
1234
1235 now := nanotime()
1236 gp.runnableTime += now - gp.trackingStamp
1237 gp.trackingStamp = 0
1238 case _Gwaiting:
1239 if !gp.waitreason.isMutexWait() {
1240
1241 break
1242 }
1243
1244
1245
1246
1247
1248 now := nanotime()
1249 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1250 gp.trackingStamp = 0
1251 }
1252 switch newval {
1253 case _Gwaiting:
1254 if !gp.waitreason.isMutexWait() {
1255
1256 break
1257 }
1258
1259 now := nanotime()
1260 gp.trackingStamp = now
1261 case _Grunnable:
1262
1263
1264 now := nanotime()
1265 gp.trackingStamp = now
1266 case _Grunning:
1267
1268
1269
1270 gp.tracking = false
1271 sched.timeToRun.record(gp.runnableTime)
1272 gp.runnableTime = 0
1273 }
1274 }
1275
1276
1277
1278
1279 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1280
1281 gp.waitreason = reason
1282 casgstatus(gp, old, _Gwaiting)
1283 }
1284
1285
1286
1287
1288
1289 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1290 if !reason.isWaitingForGC() {
1291 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1292 }
1293 casGToWaiting(gp, old, reason)
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303 func casgcopystack(gp *g) uint32 {
1304 for {
1305 oldstatus := readgstatus(gp) &^ _Gscan
1306 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1307 throw("copystack: bad status, not Gwaiting or Grunnable")
1308 }
1309 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1310 return oldstatus
1311 }
1312 }
1313 }
1314
1315
1316
1317
1318
1319 func casGToPreemptScan(gp *g, old, new uint32) {
1320 if old != _Grunning || new != _Gscan|_Gpreempted {
1321 throw("bad g transition")
1322 }
1323 acquireLockRankAndM(lockRankGscan)
1324 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1325 }
1326 }
1327
1328
1329
1330
1331 func casGFromPreempted(gp *g, old, new uint32) bool {
1332 if old != _Gpreempted || new != _Gwaiting {
1333 throw("bad g transition")
1334 }
1335 gp.waitreason = waitReasonPreempted
1336 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1337 }
1338
1339
1340 type stwReason uint8
1341
1342
1343
1344
1345 const (
1346 stwUnknown stwReason = iota
1347 stwGCMarkTerm
1348 stwGCSweepTerm
1349 stwWriteHeapDump
1350 stwGoroutineProfile
1351 stwGoroutineProfileCleanup
1352 stwAllGoroutinesStack
1353 stwReadMemStats
1354 stwAllThreadsSyscall
1355 stwGOMAXPROCS
1356 stwStartTrace
1357 stwStopTrace
1358 stwForTestCountPagesInUse
1359 stwForTestReadMetricsSlow
1360 stwForTestReadMemStatsSlow
1361 stwForTestPageCachePagesLeaked
1362 stwForTestResetDebugLog
1363 )
1364
1365 func (r stwReason) String() string {
1366 return stwReasonStrings[r]
1367 }
1368
1369 func (r stwReason) isGC() bool {
1370 return r == stwGCMarkTerm || r == stwGCSweepTerm
1371 }
1372
1373
1374
1375
1376 var stwReasonStrings = [...]string{
1377 stwUnknown: "unknown",
1378 stwGCMarkTerm: "GC mark termination",
1379 stwGCSweepTerm: "GC sweep termination",
1380 stwWriteHeapDump: "write heap dump",
1381 stwGoroutineProfile: "goroutine profile",
1382 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1383 stwAllGoroutinesStack: "all goroutines stack trace",
1384 stwReadMemStats: "read mem stats",
1385 stwAllThreadsSyscall: "AllThreadsSyscall",
1386 stwGOMAXPROCS: "GOMAXPROCS",
1387 stwStartTrace: "start trace",
1388 stwStopTrace: "stop trace",
1389 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1390 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1391 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1392 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1393 stwForTestResetDebugLog: "ResetDebugLog (test)",
1394 }
1395
1396
1397
1398 type worldStop struct {
1399 reason stwReason
1400 startedStopping int64
1401 finishedStopping int64
1402 stoppingCPUTime int64
1403 }
1404
1405
1406
1407
1408 var stopTheWorldContext worldStop
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 func stopTheWorld(reason stwReason) worldStop {
1428 semacquire(&worldsema)
1429 gp := getg()
1430 gp.m.preemptoff = reason.String()
1431 systemstack(func() {
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1447 stopTheWorldContext = stopTheWorldWithSema(reason)
1448 casgstatus(gp, _Gwaiting, _Grunning)
1449 })
1450 return stopTheWorldContext
1451 }
1452
1453
1454
1455
1456 func startTheWorld(w worldStop) {
1457 systemstack(func() { startTheWorldWithSema(0, w) })
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 mp := acquirem()
1475 mp.preemptoff = ""
1476 semrelease1(&worldsema, true, 0)
1477 releasem(mp)
1478 }
1479
1480
1481
1482
1483 func stopTheWorldGC(reason stwReason) worldStop {
1484 semacquire(&gcsema)
1485 return stopTheWorld(reason)
1486 }
1487
1488
1489
1490
1491 func startTheWorldGC(w worldStop) {
1492 startTheWorld(w)
1493 semrelease(&gcsema)
1494 }
1495
1496
1497 var worldsema uint32 = 1
1498
1499
1500
1501
1502
1503
1504
1505 var gcsema uint32 = 1
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 func stopTheWorldWithSema(reason stwReason) worldStop {
1538 trace := traceAcquire()
1539 if trace.ok() {
1540 trace.STWStart(reason)
1541 traceRelease(trace)
1542 }
1543 gp := getg()
1544
1545
1546
1547 if gp.m.locks > 0 {
1548 throw("stopTheWorld: holding locks")
1549 }
1550
1551 lock(&sched.lock)
1552 start := nanotime()
1553 sched.stopwait = gomaxprocs
1554 sched.gcwaiting.Store(true)
1555 preemptall()
1556
1557 gp.m.p.ptr().status = _Pgcstop
1558 gp.m.p.ptr().gcStopTime = start
1559 sched.stopwait--
1560
1561 trace = traceAcquire()
1562 for _, pp := range allp {
1563 s := pp.status
1564 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1565 if trace.ok() {
1566 trace.ProcSteal(pp, false)
1567 }
1568 pp.syscalltick++
1569 pp.gcStopTime = nanotime()
1570 sched.stopwait--
1571 }
1572 }
1573 if trace.ok() {
1574 traceRelease(trace)
1575 }
1576
1577
1578 now := nanotime()
1579 for {
1580 pp, _ := pidleget(now)
1581 if pp == nil {
1582 break
1583 }
1584 pp.status = _Pgcstop
1585 pp.gcStopTime = nanotime()
1586 sched.stopwait--
1587 }
1588 wait := sched.stopwait > 0
1589 unlock(&sched.lock)
1590
1591
1592 if wait {
1593 for {
1594
1595 if notetsleep(&sched.stopnote, 100*1000) {
1596 noteclear(&sched.stopnote)
1597 break
1598 }
1599 preemptall()
1600 }
1601 }
1602
1603 finish := nanotime()
1604 startTime := finish - start
1605 if reason.isGC() {
1606 sched.stwStoppingTimeGC.record(startTime)
1607 } else {
1608 sched.stwStoppingTimeOther.record(startTime)
1609 }
1610
1611
1612
1613
1614
1615 stoppingCPUTime := int64(0)
1616 bad := ""
1617 if sched.stopwait != 0 {
1618 bad = "stopTheWorld: not stopped (stopwait != 0)"
1619 } else {
1620 for _, pp := range allp {
1621 if pp.status != _Pgcstop {
1622 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1623 }
1624 if pp.gcStopTime == 0 && bad == "" {
1625 bad = "stopTheWorld: broken CPU time accounting"
1626 }
1627 stoppingCPUTime += finish - pp.gcStopTime
1628 pp.gcStopTime = 0
1629 }
1630 }
1631 if freezing.Load() {
1632
1633
1634
1635
1636 lock(&deadlock)
1637 lock(&deadlock)
1638 }
1639 if bad != "" {
1640 throw(bad)
1641 }
1642
1643 worldStopped()
1644
1645 return worldStop{
1646 reason: reason,
1647 startedStopping: start,
1648 finishedStopping: finish,
1649 stoppingCPUTime: stoppingCPUTime,
1650 }
1651 }
1652
1653
1654
1655
1656
1657
1658
1659 func startTheWorldWithSema(now int64, w worldStop) int64 {
1660 assertWorldStopped()
1661
1662 mp := acquirem()
1663 if netpollinited() {
1664 list, delta := netpoll(0)
1665 injectglist(&list)
1666 netpollAdjustWaiters(delta)
1667 }
1668 lock(&sched.lock)
1669
1670 procs := gomaxprocs
1671 if newprocs != 0 {
1672 procs = newprocs
1673 newprocs = 0
1674 }
1675 p1 := procresize(procs)
1676 sched.gcwaiting.Store(false)
1677 if sched.sysmonwait.Load() {
1678 sched.sysmonwait.Store(false)
1679 notewakeup(&sched.sysmonnote)
1680 }
1681 unlock(&sched.lock)
1682
1683 worldStarted()
1684
1685 for p1 != nil {
1686 p := p1
1687 p1 = p1.link.ptr()
1688 if p.m != 0 {
1689 mp := p.m.ptr()
1690 p.m = 0
1691 if mp.nextp != 0 {
1692 throw("startTheWorld: inconsistent mp->nextp")
1693 }
1694 mp.nextp.set(p)
1695 notewakeup(&mp.park)
1696 } else {
1697
1698 newm(nil, p, -1)
1699 }
1700 }
1701
1702
1703 if now == 0 {
1704 now = nanotime()
1705 }
1706 totalTime := now - w.startedStopping
1707 if w.reason.isGC() {
1708 sched.stwTotalTimeGC.record(totalTime)
1709 } else {
1710 sched.stwTotalTimeOther.record(totalTime)
1711 }
1712 trace := traceAcquire()
1713 if trace.ok() {
1714 trace.STWDone()
1715 traceRelease(trace)
1716 }
1717
1718
1719
1720
1721 wakep()
1722
1723 releasem(mp)
1724
1725 return now
1726 }
1727
1728
1729
1730 func usesLibcall() bool {
1731 switch GOOS {
1732 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1733 return true
1734 case "openbsd":
1735 return GOARCH != "mips64"
1736 }
1737 return false
1738 }
1739
1740
1741
1742 func mStackIsSystemAllocated() bool {
1743 switch GOOS {
1744 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1745 return true
1746 case "openbsd":
1747 return GOARCH != "mips64"
1748 }
1749 return false
1750 }
1751
1752
1753
1754 func mstart()
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 func mstart0() {
1766 gp := getg()
1767
1768 osStack := gp.stack.lo == 0
1769 if osStack {
1770
1771
1772
1773
1774
1775
1776
1777
1778 size := gp.stack.hi
1779 if size == 0 {
1780 size = 16384 * sys.StackGuardMultiplier
1781 }
1782 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1783 gp.stack.lo = gp.stack.hi - size + 1024
1784 }
1785
1786
1787 gp.stackguard0 = gp.stack.lo + stackGuard
1788
1789
1790 gp.stackguard1 = gp.stackguard0
1791 mstart1()
1792
1793
1794 if mStackIsSystemAllocated() {
1795
1796
1797
1798 osStack = true
1799 }
1800 mexit(osStack)
1801 }
1802
1803
1804
1805
1806
1807 func mstart1() {
1808 gp := getg()
1809
1810 if gp != gp.m.g0 {
1811 throw("bad runtime·mstart")
1812 }
1813
1814
1815
1816
1817
1818
1819
1820 gp.sched.g = guintptr(unsafe.Pointer(gp))
1821 gp.sched.pc = getcallerpc()
1822 gp.sched.sp = getcallersp()
1823
1824 asminit()
1825 minit()
1826
1827
1828
1829 if gp.m == &m0 {
1830 mstartm0()
1831 }
1832
1833 if fn := gp.m.mstartfn; fn != nil {
1834 fn()
1835 }
1836
1837 if gp.m != &m0 {
1838 acquirep(gp.m.nextp.ptr())
1839 gp.m.nextp = 0
1840 }
1841 schedule()
1842 }
1843
1844
1845
1846
1847
1848
1849
1850 func mstartm0() {
1851
1852
1853
1854 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1855 cgoHasExtraM = true
1856 newextram()
1857 }
1858 initsig(false)
1859 }
1860
1861
1862
1863
1864 func mPark() {
1865 gp := getg()
1866 notesleep(&gp.m.park)
1867 noteclear(&gp.m.park)
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 func mexit(osStack bool) {
1881 mp := getg().m
1882
1883 if mp == &m0 {
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 handoffp(releasep())
1896 lock(&sched.lock)
1897 sched.nmfreed++
1898 checkdead()
1899 unlock(&sched.lock)
1900 mPark()
1901 throw("locked m0 woke up")
1902 }
1903
1904 sigblock(true)
1905 unminit()
1906
1907
1908 if mp.gsignal != nil {
1909 stackfree(mp.gsignal.stack)
1910
1911
1912
1913
1914 mp.gsignal = nil
1915 }
1916
1917
1918 lock(&sched.lock)
1919 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1920 if *pprev == mp {
1921 *pprev = mp.alllink
1922 goto found
1923 }
1924 }
1925 throw("m not found in allm")
1926 found:
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 mp.freeWait.Store(freeMWait)
1942 mp.freelink = sched.freem
1943 sched.freem = mp
1944 unlock(&sched.lock)
1945
1946 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1947 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1948
1949
1950 handoffp(releasep())
1951
1952
1953
1954
1955
1956 lock(&sched.lock)
1957 sched.nmfreed++
1958 checkdead()
1959 unlock(&sched.lock)
1960
1961 if GOOS == "darwin" || GOOS == "ios" {
1962
1963
1964 if mp.signalPending.Load() != 0 {
1965 pendingPreemptSignals.Add(-1)
1966 }
1967 }
1968
1969
1970
1971 mdestroy(mp)
1972
1973 if osStack {
1974
1975 mp.freeWait.Store(freeMRef)
1976
1977
1978
1979 return
1980 }
1981
1982
1983
1984
1985
1986 exitThread(&mp.freeWait)
1987 }
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999 func forEachP(reason waitReason, fn func(*p)) {
2000 systemstack(func() {
2001 gp := getg().m.curg
2002
2003
2004
2005
2006
2007
2008
2009
2010 casGToWaitingForGC(gp, _Grunning, reason)
2011 forEachPInternal(fn)
2012 casgstatus(gp, _Gwaiting, _Grunning)
2013 })
2014 }
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 func forEachPInternal(fn func(*p)) {
2026 mp := acquirem()
2027 pp := getg().m.p.ptr()
2028
2029 lock(&sched.lock)
2030 if sched.safePointWait != 0 {
2031 throw("forEachP: sched.safePointWait != 0")
2032 }
2033 sched.safePointWait = gomaxprocs - 1
2034 sched.safePointFn = fn
2035
2036
2037 for _, p2 := range allp {
2038 if p2 != pp {
2039 atomic.Store(&p2.runSafePointFn, 1)
2040 }
2041 }
2042 preemptall()
2043
2044
2045
2046
2047
2048
2049
2050 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2051 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2052 fn(p)
2053 sched.safePointWait--
2054 }
2055 }
2056
2057 wait := sched.safePointWait > 0
2058 unlock(&sched.lock)
2059
2060
2061 fn(pp)
2062
2063
2064
2065 for _, p2 := range allp {
2066 s := p2.status
2067
2068
2069
2070 trace := traceAcquire()
2071 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2072 if trace.ok() {
2073
2074 trace.ProcSteal(p2, false)
2075 traceRelease(trace)
2076 }
2077 p2.syscalltick++
2078 handoffp(p2)
2079 } else if trace.ok() {
2080 traceRelease(trace)
2081 }
2082 }
2083
2084
2085 if wait {
2086 for {
2087
2088
2089
2090
2091 if notetsleep(&sched.safePointNote, 100*1000) {
2092 noteclear(&sched.safePointNote)
2093 break
2094 }
2095 preemptall()
2096 }
2097 }
2098 if sched.safePointWait != 0 {
2099 throw("forEachP: not done")
2100 }
2101 for _, p2 := range allp {
2102 if p2.runSafePointFn != 0 {
2103 throw("forEachP: P did not run fn")
2104 }
2105 }
2106
2107 lock(&sched.lock)
2108 sched.safePointFn = nil
2109 unlock(&sched.lock)
2110 releasem(mp)
2111 }
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 func runSafePointFn() {
2125 p := getg().m.p.ptr()
2126
2127
2128
2129 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2130 return
2131 }
2132 sched.safePointFn(p)
2133 lock(&sched.lock)
2134 sched.safePointWait--
2135 if sched.safePointWait == 0 {
2136 notewakeup(&sched.safePointNote)
2137 }
2138 unlock(&sched.lock)
2139 }
2140
2141
2142
2143
2144 var cgoThreadStart unsafe.Pointer
2145
2146 type cgothreadstart struct {
2147 g guintptr
2148 tls *uint64
2149 fn unsafe.Pointer
2150 }
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161 func allocm(pp *p, fn func(), id int64) *m {
2162 allocmLock.rlock()
2163
2164
2165
2166
2167 acquirem()
2168
2169 gp := getg()
2170 if gp.m.p == 0 {
2171 acquirep(pp)
2172 }
2173
2174
2175
2176 if sched.freem != nil {
2177 lock(&sched.lock)
2178 var newList *m
2179 for freem := sched.freem; freem != nil; {
2180
2181 wait := freem.freeWait.Load()
2182 if wait == freeMWait {
2183 next := freem.freelink
2184 freem.freelink = newList
2185 newList = freem
2186 freem = next
2187 continue
2188 }
2189
2190
2191
2192 if traceEnabled() || traceShuttingDown() {
2193 traceThreadDestroy(freem)
2194 }
2195
2196
2197
2198 if wait == freeMStack {
2199
2200
2201
2202 systemstack(func() {
2203 stackfree(freem.g0.stack)
2204 })
2205 }
2206 freem = freem.freelink
2207 }
2208 sched.freem = newList
2209 unlock(&sched.lock)
2210 }
2211
2212 mp := new(m)
2213 mp.mstartfn = fn
2214 mcommoninit(mp, id)
2215
2216
2217
2218 if iscgo || mStackIsSystemAllocated() {
2219 mp.g0 = malg(-1)
2220 } else {
2221 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2222 }
2223 mp.g0.m = mp
2224
2225 if pp == gp.m.p.ptr() {
2226 releasep()
2227 }
2228
2229 releasem(gp.m)
2230 allocmLock.runlock()
2231 return mp
2232 }
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 func needm(signal bool) {
2274 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2275
2276
2277
2278
2279
2280
2281 writeErrStr("fatal error: cgo callback before cgo call\n")
2282 exit(1)
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 var sigmask sigset
2294 sigsave(&sigmask)
2295 sigblock(false)
2296
2297
2298
2299
2300 mp, last := getExtraM()
2301
2302
2303
2304
2305
2306
2307
2308
2309 mp.needextram = last
2310
2311
2312 mp.sigmask = sigmask
2313
2314
2315
2316 osSetupTLS(mp)
2317
2318
2319
2320 setg(mp.g0)
2321 sp := getcallersp()
2322 callbackUpdateSystemStack(mp, sp, signal)
2323
2324
2325
2326
2327 mp.isExtraInC = false
2328
2329
2330 asminit()
2331 minit()
2332
2333
2334
2335
2336
2337
2338 var trace traceLocker
2339 if !signal {
2340 trace = traceAcquire()
2341 }
2342
2343
2344 casgstatus(mp.curg, _Gdead, _Gsyscall)
2345 sched.ngsys.Add(-1)
2346
2347 if !signal {
2348 if trace.ok() {
2349 trace.GoCreateSyscall(mp.curg)
2350 traceRelease(trace)
2351 }
2352 }
2353 mp.isExtraInSig = signal
2354 }
2355
2356
2357
2358
2359 func needAndBindM() {
2360 needm(false)
2361
2362 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2363 cgoBindM()
2364 }
2365 }
2366
2367
2368
2369
2370 func newextram() {
2371 c := extraMWaiters.Swap(0)
2372 if c > 0 {
2373 for i := uint32(0); i < c; i++ {
2374 oneNewExtraM()
2375 }
2376 } else if extraMLength.Load() == 0 {
2377
2378 oneNewExtraM()
2379 }
2380 }
2381
2382
2383 func oneNewExtraM() {
2384
2385
2386
2387
2388
2389 mp := allocm(nil, nil, -1)
2390 gp := malg(4096)
2391 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2392 gp.sched.sp = gp.stack.hi
2393 gp.sched.sp -= 4 * goarch.PtrSize
2394 gp.sched.lr = 0
2395 gp.sched.g = guintptr(unsafe.Pointer(gp))
2396 gp.syscallpc = gp.sched.pc
2397 gp.syscallsp = gp.sched.sp
2398 gp.stktopsp = gp.sched.sp
2399
2400
2401
2402
2403 casgstatus(gp, _Gidle, _Gdead)
2404 gp.m = mp
2405 mp.curg = gp
2406 mp.isextra = true
2407
2408 mp.isExtraInC = true
2409 mp.lockedInt++
2410 mp.lockedg.set(gp)
2411 gp.lockedm.set(mp)
2412 gp.goid = sched.goidgen.Add(1)
2413 if raceenabled {
2414 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2415 }
2416
2417 allgadd(gp)
2418
2419
2420
2421
2422
2423 sched.ngsys.Add(1)
2424
2425
2426 addExtraM(mp)
2427 }
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 func dropm() {
2463
2464
2465
2466 mp := getg().m
2467
2468
2469
2470
2471
2472 var trace traceLocker
2473 if !mp.isExtraInSig {
2474 trace = traceAcquire()
2475 }
2476
2477
2478 casgstatus(mp.curg, _Gsyscall, _Gdead)
2479 mp.curg.preemptStop = false
2480 sched.ngsys.Add(1)
2481
2482 if !mp.isExtraInSig {
2483 if trace.ok() {
2484 trace.GoDestroySyscall()
2485 traceRelease(trace)
2486 }
2487 }
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 mp.syscalltick--
2503
2504
2505
2506 mp.curg.trace.reset()
2507
2508
2509
2510
2511 if traceEnabled() || traceShuttingDown() {
2512
2513
2514
2515
2516
2517
2518
2519 lock(&sched.lock)
2520 traceThreadDestroy(mp)
2521 unlock(&sched.lock)
2522 }
2523 mp.isExtraInSig = false
2524
2525
2526
2527
2528
2529 sigmask := mp.sigmask
2530 sigblock(false)
2531 unminit()
2532
2533 setg(nil)
2534
2535
2536
2537 g0 := mp.g0
2538 g0.stack.hi = 0
2539 g0.stack.lo = 0
2540 g0.stackguard0 = 0
2541 g0.stackguard1 = 0
2542
2543 putExtraM(mp)
2544
2545 msigrestore(sigmask)
2546 }
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568 func cgoBindM() {
2569 if GOOS == "windows" || GOOS == "plan9" {
2570 fatal("bindm in unexpected GOOS")
2571 }
2572 g := getg()
2573 if g.m.g0 != g {
2574 fatal("the current g is not g0")
2575 }
2576 if _cgo_bindm != nil {
2577 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2578 }
2579 }
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 func getm() uintptr {
2593 return uintptr(unsafe.Pointer(getg().m))
2594 }
2595
2596 var (
2597
2598
2599
2600
2601
2602
2603 extraM atomic.Uintptr
2604
2605 extraMLength atomic.Uint32
2606
2607 extraMWaiters atomic.Uint32
2608
2609
2610 extraMInUse atomic.Uint32
2611 )
2612
2613
2614
2615
2616
2617
2618
2619
2620 func lockextra(nilokay bool) *m {
2621 const locked = 1
2622
2623 incr := false
2624 for {
2625 old := extraM.Load()
2626 if old == locked {
2627 osyield_no_g()
2628 continue
2629 }
2630 if old == 0 && !nilokay {
2631 if !incr {
2632
2633
2634
2635 extraMWaiters.Add(1)
2636 incr = true
2637 }
2638 usleep_no_g(1)
2639 continue
2640 }
2641 if extraM.CompareAndSwap(old, locked) {
2642 return (*m)(unsafe.Pointer(old))
2643 }
2644 osyield_no_g()
2645 continue
2646 }
2647 }
2648
2649
2650 func unlockextra(mp *m, delta int32) {
2651 extraMLength.Add(delta)
2652 extraM.Store(uintptr(unsafe.Pointer(mp)))
2653 }
2654
2655
2656
2657
2658
2659
2660
2661
2662 func getExtraM() (mp *m, last bool) {
2663 mp = lockextra(false)
2664 extraMInUse.Add(1)
2665 unlockextra(mp.schedlink.ptr(), -1)
2666 return mp, mp.schedlink.ptr() == nil
2667 }
2668
2669
2670
2671
2672
2673 func putExtraM(mp *m) {
2674 extraMInUse.Add(-1)
2675 addExtraM(mp)
2676 }
2677
2678
2679
2680
2681 func addExtraM(mp *m) {
2682 mnext := lockextra(true)
2683 mp.schedlink.set(mnext)
2684 unlockextra(mp, 1)
2685 }
2686
2687 var (
2688
2689
2690
2691 allocmLock rwmutex
2692
2693
2694
2695
2696 execLock rwmutex
2697 )
2698
2699
2700
2701 const (
2702 failthreadcreate = "runtime: failed to create new OS thread\n"
2703 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2704 )
2705
2706
2707
2708
2709 var newmHandoff struct {
2710 lock mutex
2711
2712
2713
2714 newm muintptr
2715
2716
2717
2718 waiting bool
2719 wake note
2720
2721
2722
2723
2724 haveTemplateThread uint32
2725 }
2726
2727
2728
2729
2730
2731
2732
2733
2734 func newm(fn func(), pp *p, id int64) {
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745 acquirem()
2746
2747 mp := allocm(pp, fn, id)
2748 mp.nextp.set(pp)
2749 mp.sigmask = initSigmask
2750 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762 lock(&newmHandoff.lock)
2763 if newmHandoff.haveTemplateThread == 0 {
2764 throw("on a locked thread with no template thread")
2765 }
2766 mp.schedlink = newmHandoff.newm
2767 newmHandoff.newm.set(mp)
2768 if newmHandoff.waiting {
2769 newmHandoff.waiting = false
2770 notewakeup(&newmHandoff.wake)
2771 }
2772 unlock(&newmHandoff.lock)
2773
2774
2775
2776 releasem(getg().m)
2777 return
2778 }
2779 newm1(mp)
2780 releasem(getg().m)
2781 }
2782
2783 func newm1(mp *m) {
2784 if iscgo {
2785 var ts cgothreadstart
2786 if _cgo_thread_start == nil {
2787 throw("_cgo_thread_start missing")
2788 }
2789 ts.g.set(mp.g0)
2790 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2791 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2792 if msanenabled {
2793 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2794 }
2795 if asanenabled {
2796 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2797 }
2798 execLock.rlock()
2799 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2800 execLock.runlock()
2801 return
2802 }
2803 execLock.rlock()
2804 newosproc(mp)
2805 execLock.runlock()
2806 }
2807
2808
2809
2810
2811
2812 func startTemplateThread() {
2813 if GOARCH == "wasm" {
2814 return
2815 }
2816
2817
2818
2819 mp := acquirem()
2820 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2821 releasem(mp)
2822 return
2823 }
2824 newm(templateThread, nil, -1)
2825 releasem(mp)
2826 }
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840 func templateThread() {
2841 lock(&sched.lock)
2842 sched.nmsys++
2843 checkdead()
2844 unlock(&sched.lock)
2845
2846 for {
2847 lock(&newmHandoff.lock)
2848 for newmHandoff.newm != 0 {
2849 newm := newmHandoff.newm.ptr()
2850 newmHandoff.newm = 0
2851 unlock(&newmHandoff.lock)
2852 for newm != nil {
2853 next := newm.schedlink.ptr()
2854 newm.schedlink = 0
2855 newm1(newm)
2856 newm = next
2857 }
2858 lock(&newmHandoff.lock)
2859 }
2860 newmHandoff.waiting = true
2861 noteclear(&newmHandoff.wake)
2862 unlock(&newmHandoff.lock)
2863 notesleep(&newmHandoff.wake)
2864 }
2865 }
2866
2867
2868
2869 func stopm() {
2870 gp := getg()
2871
2872 if gp.m.locks != 0 {
2873 throw("stopm holding locks")
2874 }
2875 if gp.m.p != 0 {
2876 throw("stopm holding p")
2877 }
2878 if gp.m.spinning {
2879 throw("stopm spinning")
2880 }
2881
2882 lock(&sched.lock)
2883 mput(gp.m)
2884 unlock(&sched.lock)
2885 mPark()
2886 acquirep(gp.m.nextp.ptr())
2887 gp.m.nextp = 0
2888 }
2889
2890 func mspinning() {
2891
2892 getg().m.spinning = true
2893 }
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912 func startm(pp *p, spinning, lockheld bool) {
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 mp := acquirem()
2930 if !lockheld {
2931 lock(&sched.lock)
2932 }
2933 if pp == nil {
2934 if spinning {
2935
2936
2937
2938 throw("startm: P required for spinning=true")
2939 }
2940 pp, _ = pidleget(0)
2941 if pp == nil {
2942 if !lockheld {
2943 unlock(&sched.lock)
2944 }
2945 releasem(mp)
2946 return
2947 }
2948 }
2949 nmp := mget()
2950 if nmp == nil {
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965 id := mReserveID()
2966 unlock(&sched.lock)
2967
2968 var fn func()
2969 if spinning {
2970
2971 fn = mspinning
2972 }
2973 newm(fn, pp, id)
2974
2975 if lockheld {
2976 lock(&sched.lock)
2977 }
2978
2979
2980 releasem(mp)
2981 return
2982 }
2983 if !lockheld {
2984 unlock(&sched.lock)
2985 }
2986 if nmp.spinning {
2987 throw("startm: m is spinning")
2988 }
2989 if nmp.nextp != 0 {
2990 throw("startm: m has p")
2991 }
2992 if spinning && !runqempty(pp) {
2993 throw("startm: p has runnable gs")
2994 }
2995
2996 nmp.spinning = spinning
2997 nmp.nextp.set(pp)
2998 notewakeup(&nmp.park)
2999
3000
3001 releasem(mp)
3002 }
3003
3004
3005
3006
3007
3008 func handoffp(pp *p) {
3009
3010
3011
3012
3013 if !runqempty(pp) || sched.runqsize != 0 {
3014 startm(pp, false, false)
3015 return
3016 }
3017
3018 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3019 startm(pp, false, false)
3020 return
3021 }
3022
3023 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3024 startm(pp, false, false)
3025 return
3026 }
3027
3028
3029 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3030 sched.needspinning.Store(0)
3031 startm(pp, true, false)
3032 return
3033 }
3034 lock(&sched.lock)
3035 if sched.gcwaiting.Load() {
3036 pp.status = _Pgcstop
3037 pp.gcStopTime = nanotime()
3038 sched.stopwait--
3039 if sched.stopwait == 0 {
3040 notewakeup(&sched.stopnote)
3041 }
3042 unlock(&sched.lock)
3043 return
3044 }
3045 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3046 sched.safePointFn(pp)
3047 sched.safePointWait--
3048 if sched.safePointWait == 0 {
3049 notewakeup(&sched.safePointNote)
3050 }
3051 }
3052 if sched.runqsize != 0 {
3053 unlock(&sched.lock)
3054 startm(pp, false, false)
3055 return
3056 }
3057
3058
3059 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3060 unlock(&sched.lock)
3061 startm(pp, false, false)
3062 return
3063 }
3064
3065
3066
3067 when := pp.timers.wakeTime()
3068 pidleput(pp, 0)
3069 unlock(&sched.lock)
3070
3071 if when != 0 {
3072 wakeNetPoller(when)
3073 }
3074 }
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089 func wakep() {
3090
3091
3092 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3093 return
3094 }
3095
3096
3097
3098
3099
3100
3101 mp := acquirem()
3102
3103 var pp *p
3104 lock(&sched.lock)
3105 pp, _ = pidlegetSpinning(0)
3106 if pp == nil {
3107 if sched.nmspinning.Add(-1) < 0 {
3108 throw("wakep: negative nmspinning")
3109 }
3110 unlock(&sched.lock)
3111 releasem(mp)
3112 return
3113 }
3114
3115
3116
3117
3118 unlock(&sched.lock)
3119
3120 startm(pp, true, false)
3121
3122 releasem(mp)
3123 }
3124
3125
3126
3127 func stoplockedm() {
3128 gp := getg()
3129
3130 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3131 throw("stoplockedm: inconsistent locking")
3132 }
3133 if gp.m.p != 0 {
3134
3135 pp := releasep()
3136 handoffp(pp)
3137 }
3138 incidlelocked(1)
3139
3140 mPark()
3141 status := readgstatus(gp.m.lockedg.ptr())
3142 if status&^_Gscan != _Grunnable {
3143 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3144 dumpgstatus(gp.m.lockedg.ptr())
3145 throw("stoplockedm: not runnable")
3146 }
3147 acquirep(gp.m.nextp.ptr())
3148 gp.m.nextp = 0
3149 }
3150
3151
3152
3153
3154
3155 func startlockedm(gp *g) {
3156 mp := gp.lockedm.ptr()
3157 if mp == getg().m {
3158 throw("startlockedm: locked to me")
3159 }
3160 if mp.nextp != 0 {
3161 throw("startlockedm: m has p")
3162 }
3163
3164 incidlelocked(-1)
3165 pp := releasep()
3166 mp.nextp.set(pp)
3167 notewakeup(&mp.park)
3168 stopm()
3169 }
3170
3171
3172
3173 func gcstopm() {
3174 gp := getg()
3175
3176 if !sched.gcwaiting.Load() {
3177 throw("gcstopm: not waiting for gc")
3178 }
3179 if gp.m.spinning {
3180 gp.m.spinning = false
3181
3182
3183 if sched.nmspinning.Add(-1) < 0 {
3184 throw("gcstopm: negative nmspinning")
3185 }
3186 }
3187 pp := releasep()
3188 lock(&sched.lock)
3189 pp.status = _Pgcstop
3190 pp.gcStopTime = nanotime()
3191 sched.stopwait--
3192 if sched.stopwait == 0 {
3193 notewakeup(&sched.stopnote)
3194 }
3195 unlock(&sched.lock)
3196 stopm()
3197 }
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208 func execute(gp *g, inheritTime bool) {
3209 mp := getg().m
3210
3211 if goroutineProfile.active {
3212
3213
3214
3215 tryRecordGoroutineProfile(gp, nil, osyield)
3216 }
3217
3218
3219
3220 mp.curg = gp
3221 gp.m = mp
3222 casgstatus(gp, _Grunnable, _Grunning)
3223 gp.waitsince = 0
3224 gp.preempt = false
3225 gp.stackguard0 = gp.stack.lo + stackGuard
3226 if !inheritTime {
3227 mp.p.ptr().schedtick++
3228 }
3229
3230
3231 hz := sched.profilehz
3232 if mp.profilehz != hz {
3233 setThreadCPUProfiler(hz)
3234 }
3235
3236 trace := traceAcquire()
3237 if trace.ok() {
3238 trace.GoStart()
3239 traceRelease(trace)
3240 }
3241
3242 gogo(&gp.sched)
3243 }
3244
3245
3246
3247
3248
3249 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3250 mp := getg().m
3251
3252
3253
3254
3255
3256 top:
3257 pp := mp.p.ptr()
3258 if sched.gcwaiting.Load() {
3259 gcstopm()
3260 goto top
3261 }
3262 if pp.runSafePointFn != 0 {
3263 runSafePointFn()
3264 }
3265
3266
3267
3268
3269
3270 now, pollUntil, _ := pp.timers.check(0)
3271
3272
3273 if traceEnabled() || traceShuttingDown() {
3274 gp := traceReader()
3275 if gp != nil {
3276 trace := traceAcquire()
3277 casgstatus(gp, _Gwaiting, _Grunnable)
3278 if trace.ok() {
3279 trace.GoUnpark(gp, 0)
3280 traceRelease(trace)
3281 }
3282 return gp, false, true
3283 }
3284 }
3285
3286
3287 if gcBlackenEnabled != 0 {
3288 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3289 if gp != nil {
3290 return gp, false, true
3291 }
3292 now = tnow
3293 }
3294
3295
3296
3297
3298 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3299 lock(&sched.lock)
3300 gp := globrunqget(pp, 1)
3301 unlock(&sched.lock)
3302 if gp != nil {
3303 return gp, false, false
3304 }
3305 }
3306
3307
3308 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3309 if gp := wakefing(); gp != nil {
3310 ready(gp, 0, true)
3311 }
3312 }
3313 if *cgo_yield != nil {
3314 asmcgocall(*cgo_yield, nil)
3315 }
3316
3317
3318 if gp, inheritTime := runqget(pp); gp != nil {
3319 return gp, inheritTime, false
3320 }
3321
3322
3323 if sched.runqsize != 0 {
3324 lock(&sched.lock)
3325 gp := globrunqget(pp, 0)
3326 unlock(&sched.lock)
3327 if gp != nil {
3328 return gp, false, false
3329 }
3330 }
3331
3332
3333
3334
3335
3336
3337
3338
3339 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3340 if list, delta := netpoll(0); !list.empty() {
3341 gp := list.pop()
3342 injectglist(&list)
3343 netpollAdjustWaiters(delta)
3344 trace := traceAcquire()
3345 casgstatus(gp, _Gwaiting, _Grunnable)
3346 if trace.ok() {
3347 trace.GoUnpark(gp, 0)
3348 traceRelease(trace)
3349 }
3350 return gp, false, false
3351 }
3352 }
3353
3354
3355
3356
3357
3358
3359 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3360 if !mp.spinning {
3361 mp.becomeSpinning()
3362 }
3363
3364 gp, inheritTime, tnow, w, newWork := stealWork(now)
3365 if gp != nil {
3366
3367 return gp, inheritTime, false
3368 }
3369 if newWork {
3370
3371
3372 goto top
3373 }
3374
3375 now = tnow
3376 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3377
3378 pollUntil = w
3379 }
3380 }
3381
3382
3383
3384
3385
3386 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3387 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3388 if node != nil {
3389 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3390 gp := node.gp.ptr()
3391
3392 trace := traceAcquire()
3393 casgstatus(gp, _Gwaiting, _Grunnable)
3394 if trace.ok() {
3395 trace.GoUnpark(gp, 0)
3396 traceRelease(trace)
3397 }
3398 return gp, false, false
3399 }
3400 gcController.removeIdleMarkWorker()
3401 }
3402
3403
3404
3405
3406
3407 gp, otherReady := beforeIdle(now, pollUntil)
3408 if gp != nil {
3409 trace := traceAcquire()
3410 casgstatus(gp, _Gwaiting, _Grunnable)
3411 if trace.ok() {
3412 trace.GoUnpark(gp, 0)
3413 traceRelease(trace)
3414 }
3415 return gp, false, false
3416 }
3417 if otherReady {
3418 goto top
3419 }
3420
3421
3422
3423
3424
3425 allpSnapshot := allp
3426
3427
3428 idlepMaskSnapshot := idlepMask
3429 timerpMaskSnapshot := timerpMask
3430
3431
3432 lock(&sched.lock)
3433 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3434 unlock(&sched.lock)
3435 goto top
3436 }
3437 if sched.runqsize != 0 {
3438 gp := globrunqget(pp, 0)
3439 unlock(&sched.lock)
3440 return gp, false, false
3441 }
3442 if !mp.spinning && sched.needspinning.Load() == 1 {
3443
3444 mp.becomeSpinning()
3445 unlock(&sched.lock)
3446 goto top
3447 }
3448 if releasep() != pp {
3449 throw("findrunnable: wrong p")
3450 }
3451 now = pidleput(pp, now)
3452 unlock(&sched.lock)
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490 wasSpinning := mp.spinning
3491 if mp.spinning {
3492 mp.spinning = false
3493 if sched.nmspinning.Add(-1) < 0 {
3494 throw("findrunnable: negative nmspinning")
3495 }
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508 lock(&sched.lock)
3509 if sched.runqsize != 0 {
3510 pp, _ := pidlegetSpinning(0)
3511 if pp != nil {
3512 gp := globrunqget(pp, 0)
3513 if gp == nil {
3514 throw("global runq empty with non-zero runqsize")
3515 }
3516 unlock(&sched.lock)
3517 acquirep(pp)
3518 mp.becomeSpinning()
3519 return gp, false, false
3520 }
3521 }
3522 unlock(&sched.lock)
3523
3524 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3525 if pp != nil {
3526 acquirep(pp)
3527 mp.becomeSpinning()
3528 goto top
3529 }
3530
3531
3532 pp, gp := checkIdleGCNoP()
3533 if pp != nil {
3534 acquirep(pp)
3535 mp.becomeSpinning()
3536
3537
3538 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3539 trace := traceAcquire()
3540 casgstatus(gp, _Gwaiting, _Grunnable)
3541 if trace.ok() {
3542 trace.GoUnpark(gp, 0)
3543 traceRelease(trace)
3544 }
3545 return gp, false, false
3546 }
3547
3548
3549
3550
3551
3552
3553
3554 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3555 }
3556
3557
3558 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3559 sched.pollUntil.Store(pollUntil)
3560 if mp.p != 0 {
3561 throw("findrunnable: netpoll with p")
3562 }
3563 if mp.spinning {
3564 throw("findrunnable: netpoll with spinning")
3565 }
3566 delay := int64(-1)
3567 if pollUntil != 0 {
3568 if now == 0 {
3569 now = nanotime()
3570 }
3571 delay = pollUntil - now
3572 if delay < 0 {
3573 delay = 0
3574 }
3575 }
3576 if faketime != 0 {
3577
3578 delay = 0
3579 }
3580 list, delta := netpoll(delay)
3581
3582 now = nanotime()
3583 sched.pollUntil.Store(0)
3584 sched.lastpoll.Store(now)
3585 if faketime != 0 && list.empty() {
3586
3587
3588 stopm()
3589 goto top
3590 }
3591 lock(&sched.lock)
3592 pp, _ := pidleget(now)
3593 unlock(&sched.lock)
3594 if pp == nil {
3595 injectglist(&list)
3596 netpollAdjustWaiters(delta)
3597 } else {
3598 acquirep(pp)
3599 if !list.empty() {
3600 gp := list.pop()
3601 injectglist(&list)
3602 netpollAdjustWaiters(delta)
3603 trace := traceAcquire()
3604 casgstatus(gp, _Gwaiting, _Grunnable)
3605 if trace.ok() {
3606 trace.GoUnpark(gp, 0)
3607 traceRelease(trace)
3608 }
3609 return gp, false, false
3610 }
3611 if wasSpinning {
3612 mp.becomeSpinning()
3613 }
3614 goto top
3615 }
3616 } else if pollUntil != 0 && netpollinited() {
3617 pollerPollUntil := sched.pollUntil.Load()
3618 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3619 netpollBreak()
3620 }
3621 }
3622 stopm()
3623 goto top
3624 }
3625
3626
3627
3628
3629
3630 func pollWork() bool {
3631 if sched.runqsize != 0 {
3632 return true
3633 }
3634 p := getg().m.p.ptr()
3635 if !runqempty(p) {
3636 return true
3637 }
3638 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3639 if list, delta := netpoll(0); !list.empty() {
3640 injectglist(&list)
3641 netpollAdjustWaiters(delta)
3642 return true
3643 }
3644 }
3645 return false
3646 }
3647
3648
3649
3650
3651
3652
3653
3654 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3655 pp := getg().m.p.ptr()
3656
3657 ranTimer := false
3658
3659 const stealTries = 4
3660 for i := 0; i < stealTries; i++ {
3661 stealTimersOrRunNextG := i == stealTries-1
3662
3663 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3664 if sched.gcwaiting.Load() {
3665
3666 return nil, false, now, pollUntil, true
3667 }
3668 p2 := allp[enum.position()]
3669 if pp == p2 {
3670 continue
3671 }
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3687 tnow, w, ran := p2.timers.check(now)
3688 now = tnow
3689 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3690 pollUntil = w
3691 }
3692 if ran {
3693
3694
3695
3696
3697
3698
3699
3700
3701 if gp, inheritTime := runqget(pp); gp != nil {
3702 return gp, inheritTime, now, pollUntil, ranTimer
3703 }
3704 ranTimer = true
3705 }
3706 }
3707
3708
3709 if !idlepMask.read(enum.position()) {
3710 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3711 return gp, false, now, pollUntil, ranTimer
3712 }
3713 }
3714 }
3715 }
3716
3717
3718
3719
3720 return nil, false, now, pollUntil, ranTimer
3721 }
3722
3723
3724
3725
3726
3727
3728 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3729 for id, p2 := range allpSnapshot {
3730 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3731 lock(&sched.lock)
3732 pp, _ := pidlegetSpinning(0)
3733 if pp == nil {
3734
3735 unlock(&sched.lock)
3736 return nil
3737 }
3738 unlock(&sched.lock)
3739 return pp
3740 }
3741 }
3742
3743
3744 return nil
3745 }
3746
3747
3748
3749
3750 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3751 for id, p2 := range allpSnapshot {
3752 if timerpMaskSnapshot.read(uint32(id)) {
3753 w := p2.timers.wakeTime()
3754 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3755 pollUntil = w
3756 }
3757 }
3758 }
3759
3760 return pollUntil
3761 }
3762
3763
3764
3765
3766
3767 func checkIdleGCNoP() (*p, *g) {
3768
3769
3770
3771
3772
3773
3774 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3775 return nil, nil
3776 }
3777 if !gcMarkWorkAvailable(nil) {
3778 return nil, nil
3779 }
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798 lock(&sched.lock)
3799 pp, now := pidlegetSpinning(0)
3800 if pp == nil {
3801 unlock(&sched.lock)
3802 return nil, nil
3803 }
3804
3805
3806 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3807 pidleput(pp, now)
3808 unlock(&sched.lock)
3809 return nil, nil
3810 }
3811
3812 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3813 if node == nil {
3814 pidleput(pp, now)
3815 unlock(&sched.lock)
3816 gcController.removeIdleMarkWorker()
3817 return nil, nil
3818 }
3819
3820 unlock(&sched.lock)
3821
3822 return pp, node.gp.ptr()
3823 }
3824
3825
3826
3827
3828 func wakeNetPoller(when int64) {
3829 if sched.lastpoll.Load() == 0 {
3830
3831
3832
3833
3834 pollerPollUntil := sched.pollUntil.Load()
3835 if pollerPollUntil == 0 || pollerPollUntil > when {
3836 netpollBreak()
3837 }
3838 } else {
3839
3840
3841 if GOOS != "plan9" {
3842 wakep()
3843 }
3844 }
3845 }
3846
3847 func resetspinning() {
3848 gp := getg()
3849 if !gp.m.spinning {
3850 throw("resetspinning: not a spinning m")
3851 }
3852 gp.m.spinning = false
3853 nmspinning := sched.nmspinning.Add(-1)
3854 if nmspinning < 0 {
3855 throw("findrunnable: negative nmspinning")
3856 }
3857
3858
3859
3860 wakep()
3861 }
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871 func injectglist(glist *gList) {
3872 if glist.empty() {
3873 return
3874 }
3875 trace := traceAcquire()
3876 if trace.ok() {
3877 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3878 trace.GoUnpark(gp, 0)
3879 }
3880 traceRelease(trace)
3881 }
3882
3883
3884
3885 head := glist.head.ptr()
3886 var tail *g
3887 qsize := 0
3888 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3889 tail = gp
3890 qsize++
3891 casgstatus(gp, _Gwaiting, _Grunnable)
3892 }
3893
3894
3895 var q gQueue
3896 q.head.set(head)
3897 q.tail.set(tail)
3898 *glist = gList{}
3899
3900 startIdle := func(n int) {
3901 for i := 0; i < n; i++ {
3902 mp := acquirem()
3903 lock(&sched.lock)
3904
3905 pp, _ := pidlegetSpinning(0)
3906 if pp == nil {
3907 unlock(&sched.lock)
3908 releasem(mp)
3909 break
3910 }
3911
3912 startm(pp, false, true)
3913 unlock(&sched.lock)
3914 releasem(mp)
3915 }
3916 }
3917
3918 pp := getg().m.p.ptr()
3919 if pp == nil {
3920 lock(&sched.lock)
3921 globrunqputbatch(&q, int32(qsize))
3922 unlock(&sched.lock)
3923 startIdle(qsize)
3924 return
3925 }
3926
3927 npidle := int(sched.npidle.Load())
3928 var (
3929 globq gQueue
3930 n int
3931 )
3932 for n = 0; n < npidle && !q.empty(); n++ {
3933 g := q.pop()
3934 globq.pushBack(g)
3935 }
3936 if n > 0 {
3937 lock(&sched.lock)
3938 globrunqputbatch(&globq, int32(n))
3939 unlock(&sched.lock)
3940 startIdle(n)
3941 qsize -= n
3942 }
3943
3944 if !q.empty() {
3945 runqputbatch(pp, &q, qsize)
3946 }
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961 wakep()
3962 }
3963
3964
3965
3966 func schedule() {
3967 mp := getg().m
3968
3969 if mp.locks != 0 {
3970 throw("schedule: holding locks")
3971 }
3972
3973 if mp.lockedg != 0 {
3974 stoplockedm()
3975 execute(mp.lockedg.ptr(), false)
3976 }
3977
3978
3979
3980 if mp.incgo {
3981 throw("schedule: in cgo")
3982 }
3983
3984 top:
3985 pp := mp.p.ptr()
3986 pp.preempt = false
3987
3988
3989
3990
3991 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3992 throw("schedule: spinning with local work")
3993 }
3994
3995 gp, inheritTime, tryWakeP := findRunnable()
3996
3997 if debug.dontfreezetheworld > 0 && freezing.Load() {
3998
3999
4000
4001
4002
4003
4004
4005 lock(&deadlock)
4006 lock(&deadlock)
4007 }
4008
4009
4010
4011
4012 if mp.spinning {
4013 resetspinning()
4014 }
4015
4016 if sched.disable.user && !schedEnabled(gp) {
4017
4018
4019
4020 lock(&sched.lock)
4021 if schedEnabled(gp) {
4022
4023
4024 unlock(&sched.lock)
4025 } else {
4026 sched.disable.runnable.pushBack(gp)
4027 sched.disable.n++
4028 unlock(&sched.lock)
4029 goto top
4030 }
4031 }
4032
4033
4034
4035 if tryWakeP {
4036 wakep()
4037 }
4038 if gp.lockedm != 0 {
4039
4040
4041 startlockedm(gp)
4042 goto top
4043 }
4044
4045 execute(gp, inheritTime)
4046 }
4047
4048
4049
4050
4051
4052
4053
4054
4055 func dropg() {
4056 gp := getg()
4057
4058 setMNoWB(&gp.m.curg.m, nil)
4059 setGNoWB(&gp.m.curg, nil)
4060 }
4061
4062 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4063 unlock((*mutex)(lock))
4064 return true
4065 }
4066
4067
4068 func park_m(gp *g) {
4069 mp := getg().m
4070
4071 trace := traceAcquire()
4072
4073 if trace.ok() {
4074
4075
4076
4077 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4078 }
4079
4080
4081 casgstatus(gp, _Grunning, _Gwaiting)
4082 if trace.ok() {
4083 traceRelease(trace)
4084 }
4085
4086 dropg()
4087
4088 if fn := mp.waitunlockf; fn != nil {
4089 ok := fn(gp, mp.waitlock)
4090 mp.waitunlockf = nil
4091 mp.waitlock = nil
4092 if !ok {
4093 trace := traceAcquire()
4094 casgstatus(gp, _Gwaiting, _Grunnable)
4095 if trace.ok() {
4096 trace.GoUnpark(gp, 2)
4097 traceRelease(trace)
4098 }
4099 execute(gp, true)
4100 }
4101 }
4102 schedule()
4103 }
4104
4105 func goschedImpl(gp *g, preempted bool) {
4106 trace := traceAcquire()
4107 status := readgstatus(gp)
4108 if status&^_Gscan != _Grunning {
4109 dumpgstatus(gp)
4110 throw("bad g status")
4111 }
4112 if trace.ok() {
4113
4114
4115
4116 if preempted {
4117 trace.GoPreempt()
4118 } else {
4119 trace.GoSched()
4120 }
4121 }
4122 casgstatus(gp, _Grunning, _Grunnable)
4123 if trace.ok() {
4124 traceRelease(trace)
4125 }
4126
4127 dropg()
4128 lock(&sched.lock)
4129 globrunqput(gp)
4130 unlock(&sched.lock)
4131
4132 if mainStarted {
4133 wakep()
4134 }
4135
4136 schedule()
4137 }
4138
4139
4140 func gosched_m(gp *g) {
4141 goschedImpl(gp, false)
4142 }
4143
4144
4145 func goschedguarded_m(gp *g) {
4146 if !canPreemptM(gp.m) {
4147 gogo(&gp.sched)
4148 }
4149 goschedImpl(gp, false)
4150 }
4151
4152 func gopreempt_m(gp *g) {
4153 goschedImpl(gp, true)
4154 }
4155
4156
4157
4158
4159 func preemptPark(gp *g) {
4160 status := readgstatus(gp)
4161 if status&^_Gscan != _Grunning {
4162 dumpgstatus(gp)
4163 throw("bad g status")
4164 }
4165
4166 if gp.asyncSafePoint {
4167
4168
4169
4170 f := findfunc(gp.sched.pc)
4171 if !f.valid() {
4172 throw("preempt at unknown pc")
4173 }
4174 if f.flag&abi.FuncFlagSPWrite != 0 {
4175 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4176 throw("preempt SPWRITE")
4177 }
4178 }
4179
4180
4181
4182
4183
4184
4185
4186 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4187 dropg()
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204 trace := traceAcquire()
4205 if trace.ok() {
4206 trace.GoPark(traceBlockPreempted, 0)
4207 }
4208 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4209 if trace.ok() {
4210 traceRelease(trace)
4211 }
4212 schedule()
4213 }
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229 func goyield() {
4230 checkTimeouts()
4231 mcall(goyield_m)
4232 }
4233
4234 func goyield_m(gp *g) {
4235 trace := traceAcquire()
4236 pp := gp.m.p.ptr()
4237 if trace.ok() {
4238
4239
4240
4241 trace.GoPreempt()
4242 }
4243 casgstatus(gp, _Grunning, _Grunnable)
4244 if trace.ok() {
4245 traceRelease(trace)
4246 }
4247 dropg()
4248 runqput(pp, gp, false)
4249 schedule()
4250 }
4251
4252
4253 func goexit1() {
4254 if raceenabled {
4255 racegoend()
4256 }
4257 trace := traceAcquire()
4258 if trace.ok() {
4259 trace.GoEnd()
4260 traceRelease(trace)
4261 }
4262 mcall(goexit0)
4263 }
4264
4265
4266 func goexit0(gp *g) {
4267 gdestroy(gp)
4268 schedule()
4269 }
4270
4271 func gdestroy(gp *g) {
4272 mp := getg().m
4273 pp := mp.p.ptr()
4274
4275 casgstatus(gp, _Grunning, _Gdead)
4276 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4277 if isSystemGoroutine(gp, false) {
4278 sched.ngsys.Add(-1)
4279 }
4280 gp.m = nil
4281 locked := gp.lockedm != 0
4282 gp.lockedm = 0
4283 mp.lockedg = 0
4284 gp.preemptStop = false
4285 gp.paniconfault = false
4286 gp._defer = nil
4287 gp._panic = nil
4288 gp.writebuf = nil
4289 gp.waitreason = waitReasonZero
4290 gp.param = nil
4291 gp.labels = nil
4292 gp.timer = nil
4293
4294 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4295
4296
4297
4298 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4299 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4300 gcController.bgScanCredit.Add(scanCredit)
4301 gp.gcAssistBytes = 0
4302 }
4303
4304 dropg()
4305
4306 if GOARCH == "wasm" {
4307 gfput(pp, gp)
4308 return
4309 }
4310
4311 if locked && mp.lockedInt != 0 {
4312 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4313 throw("exited a goroutine internally locked to the OS thread")
4314 }
4315 gfput(pp, gp)
4316 if locked {
4317
4318
4319
4320
4321
4322
4323 if GOOS != "plan9" {
4324 gogo(&mp.g0.sched)
4325 } else {
4326
4327
4328 mp.lockedExt = 0
4329 }
4330 }
4331 }
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341 func save(pc, sp, bp uintptr) {
4342 gp := getg()
4343
4344 if gp == gp.m.g0 || gp == gp.m.gsignal {
4345
4346
4347
4348
4349
4350 throw("save on system g not allowed")
4351 }
4352
4353 gp.sched.pc = pc
4354 gp.sched.sp = sp
4355 gp.sched.lr = 0
4356 gp.sched.ret = 0
4357 gp.sched.bp = bp
4358
4359
4360
4361 if gp.sched.ctxt != nil {
4362 badctxt()
4363 }
4364 }
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390 func reentersyscall(pc, sp, bp uintptr) {
4391 trace := traceAcquire()
4392 gp := getg()
4393
4394
4395
4396 gp.m.locks++
4397
4398
4399
4400
4401
4402 gp.stackguard0 = stackPreempt
4403 gp.throwsplit = true
4404
4405
4406 save(pc, sp, bp)
4407 gp.syscallsp = sp
4408 gp.syscallpc = pc
4409 gp.syscallbp = bp
4410 casgstatus(gp, _Grunning, _Gsyscall)
4411 if staticLockRanking {
4412
4413
4414 save(pc, sp, bp)
4415 }
4416 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4417 systemstack(func() {
4418 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4419 throw("entersyscall")
4420 })
4421 }
4422 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4423 systemstack(func() {
4424 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4425 throw("entersyscall")
4426 })
4427 }
4428
4429 if trace.ok() {
4430 systemstack(func() {
4431 trace.GoSysCall()
4432 traceRelease(trace)
4433 })
4434
4435
4436
4437 save(pc, sp, bp)
4438 }
4439
4440 if sched.sysmonwait.Load() {
4441 systemstack(entersyscall_sysmon)
4442 save(pc, sp, bp)
4443 }
4444
4445 if gp.m.p.ptr().runSafePointFn != 0 {
4446
4447 systemstack(runSafePointFn)
4448 save(pc, sp, bp)
4449 }
4450
4451 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4452 pp := gp.m.p.ptr()
4453 pp.m = 0
4454 gp.m.oldp.set(pp)
4455 gp.m.p = 0
4456 atomic.Store(&pp.status, _Psyscall)
4457 if sched.gcwaiting.Load() {
4458 systemstack(entersyscall_gcwait)
4459 save(pc, sp, bp)
4460 }
4461
4462 gp.m.locks--
4463 }
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479 func entersyscall() {
4480
4481
4482
4483
4484 fp := getcallerfp()
4485 reentersyscall(getcallerpc(), getcallersp(), fp)
4486 }
4487
4488 func entersyscall_sysmon() {
4489 lock(&sched.lock)
4490 if sched.sysmonwait.Load() {
4491 sched.sysmonwait.Store(false)
4492 notewakeup(&sched.sysmonnote)
4493 }
4494 unlock(&sched.lock)
4495 }
4496
4497 func entersyscall_gcwait() {
4498 gp := getg()
4499 pp := gp.m.oldp.ptr()
4500
4501 lock(&sched.lock)
4502 trace := traceAcquire()
4503 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4504 if trace.ok() {
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514 trace.ProcSteal(pp, true)
4515 traceRelease(trace)
4516 }
4517 pp.gcStopTime = nanotime()
4518 pp.syscalltick++
4519 if sched.stopwait--; sched.stopwait == 0 {
4520 notewakeup(&sched.stopnote)
4521 }
4522 } else if trace.ok() {
4523 traceRelease(trace)
4524 }
4525 unlock(&sched.lock)
4526 }
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540 func entersyscallblock() {
4541 gp := getg()
4542
4543 gp.m.locks++
4544 gp.throwsplit = true
4545 gp.stackguard0 = stackPreempt
4546 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4547 gp.m.p.ptr().syscalltick++
4548
4549
4550 pc := getcallerpc()
4551 sp := getcallersp()
4552 bp := getcallerfp()
4553 save(pc, sp, bp)
4554 gp.syscallsp = gp.sched.sp
4555 gp.syscallpc = gp.sched.pc
4556 gp.syscallbp = gp.sched.bp
4557 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4558 sp1 := sp
4559 sp2 := gp.sched.sp
4560 sp3 := gp.syscallsp
4561 systemstack(func() {
4562 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4563 throw("entersyscallblock")
4564 })
4565 }
4566 casgstatus(gp, _Grunning, _Gsyscall)
4567 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4568 systemstack(func() {
4569 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4570 throw("entersyscallblock")
4571 })
4572 }
4573 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4574 systemstack(func() {
4575 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4576 throw("entersyscallblock")
4577 })
4578 }
4579
4580 systemstack(entersyscallblock_handoff)
4581
4582
4583 save(getcallerpc(), getcallersp(), getcallerfp())
4584
4585 gp.m.locks--
4586 }
4587
4588 func entersyscallblock_handoff() {
4589 trace := traceAcquire()
4590 if trace.ok() {
4591 trace.GoSysCall()
4592 traceRelease(trace)
4593 }
4594 handoffp(releasep())
4595 }
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617 func exitsyscall() {
4618 gp := getg()
4619
4620 gp.m.locks++
4621 if getcallersp() > gp.syscallsp {
4622 throw("exitsyscall: syscall frame is no longer valid")
4623 }
4624
4625 gp.waitsince = 0
4626 oldp := gp.m.oldp.ptr()
4627 gp.m.oldp = 0
4628 if exitsyscallfast(oldp) {
4629
4630
4631 if goroutineProfile.active {
4632
4633
4634
4635 systemstack(func() {
4636 tryRecordGoroutineProfileWB(gp)
4637 })
4638 }
4639 trace := traceAcquire()
4640 if trace.ok() {
4641 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4642 systemstack(func() {
4643
4644
4645
4646
4647 trace.GoSysExit(lostP)
4648 if lostP {
4649
4650
4651
4652
4653 trace.GoStart()
4654 }
4655 })
4656 }
4657
4658 gp.m.p.ptr().syscalltick++
4659
4660 casgstatus(gp, _Gsyscall, _Grunning)
4661 if trace.ok() {
4662 traceRelease(trace)
4663 }
4664
4665
4666
4667 gp.syscallsp = 0
4668 gp.m.locks--
4669 if gp.preempt {
4670
4671 gp.stackguard0 = stackPreempt
4672 } else {
4673
4674 gp.stackguard0 = gp.stack.lo + stackGuard
4675 }
4676 gp.throwsplit = false
4677
4678 if sched.disable.user && !schedEnabled(gp) {
4679
4680 Gosched()
4681 }
4682
4683 return
4684 }
4685
4686 gp.m.locks--
4687
4688
4689 mcall(exitsyscall0)
4690
4691
4692
4693
4694
4695
4696
4697 gp.syscallsp = 0
4698 gp.m.p.ptr().syscalltick++
4699 gp.throwsplit = false
4700 }
4701
4702
4703 func exitsyscallfast(oldp *p) bool {
4704
4705 if sched.stopwait == freezeStopWait {
4706 return false
4707 }
4708
4709
4710 trace := traceAcquire()
4711 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4712
4713 wirep(oldp)
4714 exitsyscallfast_reacquired(trace)
4715 if trace.ok() {
4716 traceRelease(trace)
4717 }
4718 return true
4719 }
4720 if trace.ok() {
4721 traceRelease(trace)
4722 }
4723
4724
4725 if sched.pidle != 0 {
4726 var ok bool
4727 systemstack(func() {
4728 ok = exitsyscallfast_pidle()
4729 })
4730 if ok {
4731 return true
4732 }
4733 }
4734 return false
4735 }
4736
4737
4738
4739
4740
4741
4742 func exitsyscallfast_reacquired(trace traceLocker) {
4743 gp := getg()
4744 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4745 if trace.ok() {
4746
4747
4748
4749 systemstack(func() {
4750
4751
4752 trace.ProcSteal(gp.m.p.ptr(), true)
4753 trace.ProcStart()
4754 })
4755 }
4756 gp.m.p.ptr().syscalltick++
4757 }
4758 }
4759
4760 func exitsyscallfast_pidle() bool {
4761 lock(&sched.lock)
4762 pp, _ := pidleget(0)
4763 if pp != nil && sched.sysmonwait.Load() {
4764 sched.sysmonwait.Store(false)
4765 notewakeup(&sched.sysmonnote)
4766 }
4767 unlock(&sched.lock)
4768 if pp != nil {
4769 acquirep(pp)
4770 return true
4771 }
4772 return false
4773 }
4774
4775
4776
4777
4778
4779
4780
4781 func exitsyscall0(gp *g) {
4782 var trace traceLocker
4783 traceExitingSyscall()
4784 trace = traceAcquire()
4785 casgstatus(gp, _Gsyscall, _Grunnable)
4786 traceExitedSyscall()
4787 if trace.ok() {
4788
4789
4790
4791
4792 trace.GoSysExit(true)
4793 traceRelease(trace)
4794 }
4795 dropg()
4796 lock(&sched.lock)
4797 var pp *p
4798 if schedEnabled(gp) {
4799 pp, _ = pidleget(0)
4800 }
4801 var locked bool
4802 if pp == nil {
4803 globrunqput(gp)
4804
4805
4806
4807
4808
4809
4810 locked = gp.lockedm != 0
4811 } else if sched.sysmonwait.Load() {
4812 sched.sysmonwait.Store(false)
4813 notewakeup(&sched.sysmonnote)
4814 }
4815 unlock(&sched.lock)
4816 if pp != nil {
4817 acquirep(pp)
4818 execute(gp, false)
4819 }
4820 if locked {
4821
4822
4823
4824
4825 stoplockedm()
4826 execute(gp, false)
4827 }
4828 stopm()
4829 schedule()
4830 }
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845 func syscall_runtime_BeforeFork() {
4846 gp := getg().m.curg
4847
4848
4849
4850
4851 gp.m.locks++
4852 sigsave(&gp.m.sigmask)
4853 sigblock(false)
4854
4855
4856
4857
4858
4859 gp.stackguard0 = stackFork
4860 }
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875 func syscall_runtime_AfterFork() {
4876 gp := getg().m.curg
4877
4878
4879 gp.stackguard0 = gp.stack.lo + stackGuard
4880
4881 msigrestore(gp.m.sigmask)
4882
4883 gp.m.locks--
4884 }
4885
4886
4887
4888 var inForkedChild bool
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910 func syscall_runtime_AfterForkInChild() {
4911
4912
4913
4914
4915 inForkedChild = true
4916
4917 clearSignalHandlers()
4918
4919
4920
4921 msigrestore(getg().m.sigmask)
4922
4923 inForkedChild = false
4924 }
4925
4926
4927
4928
4929 var pendingPreemptSignals atomic.Int32
4930
4931
4932
4933
4934 func syscall_runtime_BeforeExec() {
4935
4936 execLock.lock()
4937
4938
4939
4940 if GOOS == "darwin" || GOOS == "ios" {
4941 for pendingPreemptSignals.Load() > 0 {
4942 osyield()
4943 }
4944 }
4945 }
4946
4947
4948
4949
4950 func syscall_runtime_AfterExec() {
4951 execLock.unlock()
4952 }
4953
4954
4955 func malg(stacksize int32) *g {
4956 newg := new(g)
4957 if stacksize >= 0 {
4958 stacksize = round2(stackSystem + stacksize)
4959 systemstack(func() {
4960 newg.stack = stackalloc(uint32(stacksize))
4961 })
4962 newg.stackguard0 = newg.stack.lo + stackGuard
4963 newg.stackguard1 = ^uintptr(0)
4964
4965
4966 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4967 }
4968 return newg
4969 }
4970
4971
4972
4973
4974 func newproc(fn *funcval) {
4975 gp := getg()
4976 pc := getcallerpc()
4977 systemstack(func() {
4978 newg := newproc1(fn, gp, pc, false, waitReasonZero)
4979
4980 pp := getg().m.p.ptr()
4981 runqput(pp, newg, true)
4982
4983 if mainStarted {
4984 wakep()
4985 }
4986 })
4987 }
4988
4989
4990
4991
4992 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
4993 if fn == nil {
4994 fatal("go of nil func value")
4995 }
4996
4997 mp := acquirem()
4998 pp := mp.p.ptr()
4999 newg := gfget(pp)
5000 if newg == nil {
5001 newg = malg(stackMin)
5002 casgstatus(newg, _Gidle, _Gdead)
5003 allgadd(newg)
5004 }
5005 if newg.stack.hi == 0 {
5006 throw("newproc1: newg missing stack")
5007 }
5008
5009 if readgstatus(newg) != _Gdead {
5010 throw("newproc1: new g is not Gdead")
5011 }
5012
5013 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5014 totalSize = alignUp(totalSize, sys.StackAlign)
5015 sp := newg.stack.hi - totalSize
5016 if usesLR {
5017
5018 *(*uintptr)(unsafe.Pointer(sp)) = 0
5019 prepGoExitFrame(sp)
5020 }
5021 if GOARCH == "arm64" {
5022
5023 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5024 }
5025
5026 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5027 newg.sched.sp = sp
5028 newg.stktopsp = sp
5029 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5030 newg.sched.g = guintptr(unsafe.Pointer(newg))
5031 gostartcallfn(&newg.sched, fn)
5032 newg.parentGoid = callergp.goid
5033 newg.gopc = callerpc
5034 newg.ancestors = saveAncestors(callergp)
5035 newg.startpc = fn.fn
5036 if isSystemGoroutine(newg, false) {
5037 sched.ngsys.Add(1)
5038 } else {
5039
5040 if mp.curg != nil {
5041 newg.labels = mp.curg.labels
5042 }
5043 if goroutineProfile.active {
5044
5045
5046
5047
5048
5049 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5050 }
5051 }
5052
5053 newg.trackingSeq = uint8(cheaprand())
5054 if newg.trackingSeq%gTrackingPeriod == 0 {
5055 newg.tracking = true
5056 }
5057 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5058
5059
5060 trace := traceAcquire()
5061 var status uint32 = _Grunnable
5062 if parked {
5063 status = _Gwaiting
5064 newg.waitreason = waitreason
5065 }
5066 casgstatus(newg, _Gdead, status)
5067 if pp.goidcache == pp.goidcacheend {
5068
5069
5070
5071 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5072 pp.goidcache -= _GoidCacheBatch - 1
5073 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5074 }
5075 newg.goid = pp.goidcache
5076 pp.goidcache++
5077 newg.trace.reset()
5078 if trace.ok() {
5079 trace.GoCreate(newg, newg.startpc, parked)
5080 traceRelease(trace)
5081 }
5082
5083
5084 if raceenabled {
5085 newg.racectx = racegostart(callerpc)
5086 newg.raceignore = 0
5087 if newg.labels != nil {
5088
5089
5090 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5091 }
5092 }
5093 releasem(mp)
5094
5095 return newg
5096 }
5097
5098
5099
5100
5101 func saveAncestors(callergp *g) *[]ancestorInfo {
5102
5103 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5104 return nil
5105 }
5106 var callerAncestors []ancestorInfo
5107 if callergp.ancestors != nil {
5108 callerAncestors = *callergp.ancestors
5109 }
5110 n := int32(len(callerAncestors)) + 1
5111 if n > debug.tracebackancestors {
5112 n = debug.tracebackancestors
5113 }
5114 ancestors := make([]ancestorInfo, n)
5115 copy(ancestors[1:], callerAncestors)
5116
5117 var pcs [tracebackInnerFrames]uintptr
5118 npcs := gcallers(callergp, 0, pcs[:])
5119 ipcs := make([]uintptr, npcs)
5120 copy(ipcs, pcs[:])
5121 ancestors[0] = ancestorInfo{
5122 pcs: ipcs,
5123 goid: callergp.goid,
5124 gopc: callergp.gopc,
5125 }
5126
5127 ancestorsp := new([]ancestorInfo)
5128 *ancestorsp = ancestors
5129 return ancestorsp
5130 }
5131
5132
5133
5134 func gfput(pp *p, gp *g) {
5135 if readgstatus(gp) != _Gdead {
5136 throw("gfput: bad status (not Gdead)")
5137 }
5138
5139 stksize := gp.stack.hi - gp.stack.lo
5140
5141 if stksize != uintptr(startingStackSize) {
5142
5143 stackfree(gp.stack)
5144 gp.stack.lo = 0
5145 gp.stack.hi = 0
5146 gp.stackguard0 = 0
5147 }
5148
5149 pp.gFree.push(gp)
5150 pp.gFree.n++
5151 if pp.gFree.n >= 64 {
5152 var (
5153 inc int32
5154 stackQ gQueue
5155 noStackQ gQueue
5156 )
5157 for pp.gFree.n >= 32 {
5158 gp := pp.gFree.pop()
5159 pp.gFree.n--
5160 if gp.stack.lo == 0 {
5161 noStackQ.push(gp)
5162 } else {
5163 stackQ.push(gp)
5164 }
5165 inc++
5166 }
5167 lock(&sched.gFree.lock)
5168 sched.gFree.noStack.pushAll(noStackQ)
5169 sched.gFree.stack.pushAll(stackQ)
5170 sched.gFree.n += inc
5171 unlock(&sched.gFree.lock)
5172 }
5173 }
5174
5175
5176
5177 func gfget(pp *p) *g {
5178 retry:
5179 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5180 lock(&sched.gFree.lock)
5181
5182 for pp.gFree.n < 32 {
5183
5184 gp := sched.gFree.stack.pop()
5185 if gp == nil {
5186 gp = sched.gFree.noStack.pop()
5187 if gp == nil {
5188 break
5189 }
5190 }
5191 sched.gFree.n--
5192 pp.gFree.push(gp)
5193 pp.gFree.n++
5194 }
5195 unlock(&sched.gFree.lock)
5196 goto retry
5197 }
5198 gp := pp.gFree.pop()
5199 if gp == nil {
5200 return nil
5201 }
5202 pp.gFree.n--
5203 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5204
5205
5206
5207 systemstack(func() {
5208 stackfree(gp.stack)
5209 gp.stack.lo = 0
5210 gp.stack.hi = 0
5211 gp.stackguard0 = 0
5212 })
5213 }
5214 if gp.stack.lo == 0 {
5215
5216 systemstack(func() {
5217 gp.stack = stackalloc(startingStackSize)
5218 })
5219 gp.stackguard0 = gp.stack.lo + stackGuard
5220 } else {
5221 if raceenabled {
5222 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5223 }
5224 if msanenabled {
5225 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5226 }
5227 if asanenabled {
5228 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5229 }
5230 }
5231 return gp
5232 }
5233
5234
5235 func gfpurge(pp *p) {
5236 var (
5237 inc int32
5238 stackQ gQueue
5239 noStackQ gQueue
5240 )
5241 for !pp.gFree.empty() {
5242 gp := pp.gFree.pop()
5243 pp.gFree.n--
5244 if gp.stack.lo == 0 {
5245 noStackQ.push(gp)
5246 } else {
5247 stackQ.push(gp)
5248 }
5249 inc++
5250 }
5251 lock(&sched.gFree.lock)
5252 sched.gFree.noStack.pushAll(noStackQ)
5253 sched.gFree.stack.pushAll(stackQ)
5254 sched.gFree.n += inc
5255 unlock(&sched.gFree.lock)
5256 }
5257
5258
5259 func Breakpoint() {
5260 breakpoint()
5261 }
5262
5263
5264
5265
5266
5267
5268 func dolockOSThread() {
5269 if GOARCH == "wasm" {
5270 return
5271 }
5272 gp := getg()
5273 gp.m.lockedg.set(gp)
5274 gp.lockedm.set(gp.m)
5275 }
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293 func LockOSThread() {
5294 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5295
5296
5297
5298 startTemplateThread()
5299 }
5300 gp := getg()
5301 gp.m.lockedExt++
5302 if gp.m.lockedExt == 0 {
5303 gp.m.lockedExt--
5304 panic("LockOSThread nesting overflow")
5305 }
5306 dolockOSThread()
5307 }
5308
5309
5310 func lockOSThread() {
5311 getg().m.lockedInt++
5312 dolockOSThread()
5313 }
5314
5315
5316
5317
5318
5319
5320 func dounlockOSThread() {
5321 if GOARCH == "wasm" {
5322 return
5323 }
5324 gp := getg()
5325 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5326 return
5327 }
5328 gp.m.lockedg = 0
5329 gp.lockedm = 0
5330 }
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346 func UnlockOSThread() {
5347 gp := getg()
5348 if gp.m.lockedExt == 0 {
5349 return
5350 }
5351 gp.m.lockedExt--
5352 dounlockOSThread()
5353 }
5354
5355
5356 func unlockOSThread() {
5357 gp := getg()
5358 if gp.m.lockedInt == 0 {
5359 systemstack(badunlockosthread)
5360 }
5361 gp.m.lockedInt--
5362 dounlockOSThread()
5363 }
5364
5365 func badunlockosthread() {
5366 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5367 }
5368
5369 func gcount() int32 {
5370 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5371 for _, pp := range allp {
5372 n -= pp.gFree.n
5373 }
5374
5375
5376
5377 if n < 1 {
5378 n = 1
5379 }
5380 return n
5381 }
5382
5383 func mcount() int32 {
5384 return int32(sched.mnext - sched.nmfreed)
5385 }
5386
5387 var prof struct {
5388 signalLock atomic.Uint32
5389
5390
5391
5392 hz atomic.Int32
5393 }
5394
5395 func _System() { _System() }
5396 func _ExternalCode() { _ExternalCode() }
5397 func _LostExternalCode() { _LostExternalCode() }
5398 func _GC() { _GC() }
5399 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5400 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5401 func _VDSO() { _VDSO() }
5402
5403
5404
5405
5406
5407 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5408 if prof.hz.Load() == 0 {
5409 return
5410 }
5411
5412
5413
5414
5415 if mp != nil && mp.profilehz == 0 {
5416 return
5417 }
5418
5419
5420
5421
5422
5423
5424
5425 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5426 if f := findfunc(pc); f.valid() {
5427 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5428 cpuprof.lostAtomic++
5429 return
5430 }
5431 }
5432 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5433
5434
5435
5436 cpuprof.lostAtomic++
5437 return
5438 }
5439 }
5440
5441
5442
5443
5444
5445
5446
5447 getg().m.mallocing++
5448
5449 var u unwinder
5450 var stk [maxCPUProfStack]uintptr
5451 n := 0
5452 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5453 cgoOff := 0
5454
5455
5456
5457
5458
5459 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5460 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5461 cgoOff++
5462 }
5463 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5464 mp.cgoCallers[0] = 0
5465 }
5466
5467
5468 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5469 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5470
5471
5472 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5473 } else if mp != nil && mp.vdsoSP != 0 {
5474
5475
5476 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5477 } else {
5478 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5479 }
5480 n += tracebackPCs(&u, 0, stk[n:])
5481
5482 if n <= 0 {
5483
5484
5485 n = 2
5486 if inVDSOPage(pc) {
5487 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5488 } else if pc > firstmoduledata.etext {
5489
5490 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5491 }
5492 stk[0] = pc
5493 if mp.preemptoff != "" {
5494 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5495 } else {
5496 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5497 }
5498 }
5499
5500 if prof.hz.Load() != 0 {
5501
5502
5503
5504 var tagPtr *unsafe.Pointer
5505 if gp != nil && gp.m != nil && gp.m.curg != nil {
5506 tagPtr = &gp.m.curg.labels
5507 }
5508 cpuprof.add(tagPtr, stk[:n])
5509
5510 gprof := gp
5511 var mp *m
5512 var pp *p
5513 if gp != nil && gp.m != nil {
5514 if gp.m.curg != nil {
5515 gprof = gp.m.curg
5516 }
5517 mp = gp.m
5518 pp = gp.m.p.ptr()
5519 }
5520 traceCPUSample(gprof, mp, pp, stk[:n])
5521 }
5522 getg().m.mallocing--
5523 }
5524
5525
5526
5527 func setcpuprofilerate(hz int32) {
5528
5529 if hz < 0 {
5530 hz = 0
5531 }
5532
5533
5534
5535 gp := getg()
5536 gp.m.locks++
5537
5538
5539
5540
5541 setThreadCPUProfiler(0)
5542
5543 for !prof.signalLock.CompareAndSwap(0, 1) {
5544 osyield()
5545 }
5546 if prof.hz.Load() != hz {
5547 setProcessCPUProfiler(hz)
5548 prof.hz.Store(hz)
5549 }
5550 prof.signalLock.Store(0)
5551
5552 lock(&sched.lock)
5553 sched.profilehz = hz
5554 unlock(&sched.lock)
5555
5556 if hz != 0 {
5557 setThreadCPUProfiler(hz)
5558 }
5559
5560 gp.m.locks--
5561 }
5562
5563
5564
5565 func (pp *p) init(id int32) {
5566 pp.id = id
5567 pp.status = _Pgcstop
5568 pp.sudogcache = pp.sudogbuf[:0]
5569 pp.deferpool = pp.deferpoolbuf[:0]
5570 pp.wbBuf.reset()
5571 if pp.mcache == nil {
5572 if id == 0 {
5573 if mcache0 == nil {
5574 throw("missing mcache?")
5575 }
5576
5577
5578 pp.mcache = mcache0
5579 } else {
5580 pp.mcache = allocmcache()
5581 }
5582 }
5583 if raceenabled && pp.raceprocctx == 0 {
5584 if id == 0 {
5585 pp.raceprocctx = raceprocctx0
5586 raceprocctx0 = 0
5587 } else {
5588 pp.raceprocctx = raceproccreate()
5589 }
5590 }
5591 lockInit(&pp.timers.mu, lockRankTimers)
5592
5593
5594
5595 timerpMask.set(id)
5596
5597
5598 idlepMask.clear(id)
5599 }
5600
5601
5602
5603
5604
5605 func (pp *p) destroy() {
5606 assertLockHeld(&sched.lock)
5607 assertWorldStopped()
5608
5609
5610 for pp.runqhead != pp.runqtail {
5611
5612 pp.runqtail--
5613 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5614
5615 globrunqputhead(gp)
5616 }
5617 if pp.runnext != 0 {
5618 globrunqputhead(pp.runnext.ptr())
5619 pp.runnext = 0
5620 }
5621
5622
5623 getg().m.p.ptr().timers.take(&pp.timers)
5624
5625
5626 if gcphase != _GCoff {
5627 wbBufFlush1(pp)
5628 pp.gcw.dispose()
5629 }
5630 for i := range pp.sudogbuf {
5631 pp.sudogbuf[i] = nil
5632 }
5633 pp.sudogcache = pp.sudogbuf[:0]
5634 pp.pinnerCache = nil
5635 for j := range pp.deferpoolbuf {
5636 pp.deferpoolbuf[j] = nil
5637 }
5638 pp.deferpool = pp.deferpoolbuf[:0]
5639 systemstack(func() {
5640 for i := 0; i < pp.mspancache.len; i++ {
5641
5642 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5643 }
5644 pp.mspancache.len = 0
5645 lock(&mheap_.lock)
5646 pp.pcache.flush(&mheap_.pages)
5647 unlock(&mheap_.lock)
5648 })
5649 freemcache(pp.mcache)
5650 pp.mcache = nil
5651 gfpurge(pp)
5652 if raceenabled {
5653 if pp.timers.raceCtx != 0 {
5654
5655
5656
5657
5658
5659 mp := getg().m
5660 phold := mp.p.ptr()
5661 mp.p.set(pp)
5662
5663 racectxend(pp.timers.raceCtx)
5664 pp.timers.raceCtx = 0
5665
5666 mp.p.set(phold)
5667 }
5668 raceprocdestroy(pp.raceprocctx)
5669 pp.raceprocctx = 0
5670 }
5671 pp.gcAssistTime = 0
5672 pp.status = _Pdead
5673 }
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683 func procresize(nprocs int32) *p {
5684 assertLockHeld(&sched.lock)
5685 assertWorldStopped()
5686
5687 old := gomaxprocs
5688 if old < 0 || nprocs <= 0 {
5689 throw("procresize: invalid arg")
5690 }
5691 trace := traceAcquire()
5692 if trace.ok() {
5693 trace.Gomaxprocs(nprocs)
5694 traceRelease(trace)
5695 }
5696
5697
5698 now := nanotime()
5699 if sched.procresizetime != 0 {
5700 sched.totaltime += int64(old) * (now - sched.procresizetime)
5701 }
5702 sched.procresizetime = now
5703
5704 maskWords := (nprocs + 31) / 32
5705
5706
5707 if nprocs > int32(len(allp)) {
5708
5709
5710 lock(&allpLock)
5711 if nprocs <= int32(cap(allp)) {
5712 allp = allp[:nprocs]
5713 } else {
5714 nallp := make([]*p, nprocs)
5715
5716
5717 copy(nallp, allp[:cap(allp)])
5718 allp = nallp
5719 }
5720
5721 if maskWords <= int32(cap(idlepMask)) {
5722 idlepMask = idlepMask[:maskWords]
5723 timerpMask = timerpMask[:maskWords]
5724 } else {
5725 nidlepMask := make([]uint32, maskWords)
5726
5727 copy(nidlepMask, idlepMask)
5728 idlepMask = nidlepMask
5729
5730 ntimerpMask := make([]uint32, maskWords)
5731 copy(ntimerpMask, timerpMask)
5732 timerpMask = ntimerpMask
5733 }
5734 unlock(&allpLock)
5735 }
5736
5737
5738 for i := old; i < nprocs; i++ {
5739 pp := allp[i]
5740 if pp == nil {
5741 pp = new(p)
5742 }
5743 pp.init(i)
5744 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5745 }
5746
5747 gp := getg()
5748 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5749
5750 gp.m.p.ptr().status = _Prunning
5751 gp.m.p.ptr().mcache.prepareForSweep()
5752 } else {
5753
5754
5755
5756
5757
5758 if gp.m.p != 0 {
5759 trace := traceAcquire()
5760 if trace.ok() {
5761
5762
5763
5764 trace.GoSched()
5765 trace.ProcStop(gp.m.p.ptr())
5766 traceRelease(trace)
5767 }
5768 gp.m.p.ptr().m = 0
5769 }
5770 gp.m.p = 0
5771 pp := allp[0]
5772 pp.m = 0
5773 pp.status = _Pidle
5774 acquirep(pp)
5775 trace := traceAcquire()
5776 if trace.ok() {
5777 trace.GoStart()
5778 traceRelease(trace)
5779 }
5780 }
5781
5782
5783 mcache0 = nil
5784
5785
5786 for i := nprocs; i < old; i++ {
5787 pp := allp[i]
5788 pp.destroy()
5789
5790 }
5791
5792
5793 if int32(len(allp)) != nprocs {
5794 lock(&allpLock)
5795 allp = allp[:nprocs]
5796 idlepMask = idlepMask[:maskWords]
5797 timerpMask = timerpMask[:maskWords]
5798 unlock(&allpLock)
5799 }
5800
5801 var runnablePs *p
5802 for i := nprocs - 1; i >= 0; i-- {
5803 pp := allp[i]
5804 if gp.m.p.ptr() == pp {
5805 continue
5806 }
5807 pp.status = _Pidle
5808 if runqempty(pp) {
5809 pidleput(pp, now)
5810 } else {
5811 pp.m.set(mget())
5812 pp.link.set(runnablePs)
5813 runnablePs = pp
5814 }
5815 }
5816 stealOrder.reset(uint32(nprocs))
5817 var int32p *int32 = &gomaxprocs
5818 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5819 if old != nprocs {
5820
5821 gcCPULimiter.resetCapacity(now, nprocs)
5822 }
5823 return runnablePs
5824 }
5825
5826
5827
5828
5829
5830
5831
5832 func acquirep(pp *p) {
5833
5834 wirep(pp)
5835
5836
5837
5838
5839
5840 pp.mcache.prepareForSweep()
5841
5842 trace := traceAcquire()
5843 if trace.ok() {
5844 trace.ProcStart()
5845 traceRelease(trace)
5846 }
5847 }
5848
5849
5850
5851
5852
5853
5854
5855 func wirep(pp *p) {
5856 gp := getg()
5857
5858 if gp.m.p != 0 {
5859
5860
5861 systemstack(func() {
5862 throw("wirep: already in go")
5863 })
5864 }
5865 if pp.m != 0 || pp.status != _Pidle {
5866
5867
5868 systemstack(func() {
5869 id := int64(0)
5870 if pp.m != 0 {
5871 id = pp.m.ptr().id
5872 }
5873 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5874 throw("wirep: invalid p state")
5875 })
5876 }
5877 gp.m.p.set(pp)
5878 pp.m.set(gp.m)
5879 pp.status = _Prunning
5880 }
5881
5882
5883 func releasep() *p {
5884 trace := traceAcquire()
5885 if trace.ok() {
5886 trace.ProcStop(getg().m.p.ptr())
5887 traceRelease(trace)
5888 }
5889 return releasepNoTrace()
5890 }
5891
5892
5893 func releasepNoTrace() *p {
5894 gp := getg()
5895
5896 if gp.m.p == 0 {
5897 throw("releasep: invalid arg")
5898 }
5899 pp := gp.m.p.ptr()
5900 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5901 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5902 throw("releasep: invalid p state")
5903 }
5904 gp.m.p = 0
5905 pp.m = 0
5906 pp.status = _Pidle
5907 return pp
5908 }
5909
5910 func incidlelocked(v int32) {
5911 lock(&sched.lock)
5912 sched.nmidlelocked += v
5913 if v > 0 {
5914 checkdead()
5915 }
5916 unlock(&sched.lock)
5917 }
5918
5919
5920
5921
5922 func checkdead() {
5923 assertLockHeld(&sched.lock)
5924
5925
5926
5927
5928 if islibrary || isarchive {
5929 return
5930 }
5931
5932
5933
5934
5935
5936 if panicking.Load() > 0 {
5937 return
5938 }
5939
5940
5941
5942
5943
5944 var run0 int32
5945 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5946 run0 = 1
5947 }
5948
5949 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5950 if run > run0 {
5951 return
5952 }
5953 if run < 0 {
5954 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5955 unlock(&sched.lock)
5956 throw("checkdead: inconsistent counts")
5957 }
5958
5959 grunning := 0
5960 forEachG(func(gp *g) {
5961 if isSystemGoroutine(gp, false) {
5962 return
5963 }
5964 s := readgstatus(gp)
5965 switch s &^ _Gscan {
5966 case _Gwaiting,
5967 _Gpreempted:
5968 grunning++
5969 case _Grunnable,
5970 _Grunning,
5971 _Gsyscall:
5972 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5973 unlock(&sched.lock)
5974 throw("checkdead: runnable g")
5975 }
5976 })
5977 if grunning == 0 {
5978 unlock(&sched.lock)
5979 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5980 }
5981
5982
5983 if faketime != 0 {
5984 if when := timeSleepUntil(); when < maxWhen {
5985 faketime = when
5986
5987
5988 pp, _ := pidleget(faketime)
5989 if pp == nil {
5990
5991
5992 unlock(&sched.lock)
5993 throw("checkdead: no p for timer")
5994 }
5995 mp := mget()
5996 if mp == nil {
5997
5998
5999 unlock(&sched.lock)
6000 throw("checkdead: no m for timer")
6001 }
6002
6003
6004
6005 sched.nmspinning.Add(1)
6006 mp.spinning = true
6007 mp.nextp.set(pp)
6008 notewakeup(&mp.park)
6009 return
6010 }
6011 }
6012
6013
6014 for _, pp := range allp {
6015 if len(pp.timers.heap) > 0 {
6016 return
6017 }
6018 }
6019
6020 unlock(&sched.lock)
6021 fatal("all goroutines are asleep - deadlock!")
6022 }
6023
6024
6025
6026
6027
6028
6029 var forcegcperiod int64 = 2 * 60 * 1e9
6030
6031
6032
6033 var needSysmonWorkaround bool = false
6034
6035
6036
6037
6038 const haveSysmon = GOARCH != "wasm"
6039
6040
6041
6042
6043 func sysmon() {
6044 lock(&sched.lock)
6045 sched.nmsys++
6046 checkdead()
6047 unlock(&sched.lock)
6048
6049 lasttrace := int64(0)
6050 idle := 0
6051 delay := uint32(0)
6052
6053 for {
6054 if idle == 0 {
6055 delay = 20
6056 } else if idle > 50 {
6057 delay *= 2
6058 }
6059 if delay > 10*1000 {
6060 delay = 10 * 1000
6061 }
6062 usleep(delay)
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079 now := nanotime()
6080 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6081 lock(&sched.lock)
6082 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6083 syscallWake := false
6084 next := timeSleepUntil()
6085 if next > now {
6086 sched.sysmonwait.Store(true)
6087 unlock(&sched.lock)
6088
6089
6090 sleep := forcegcperiod / 2
6091 if next-now < sleep {
6092 sleep = next - now
6093 }
6094 shouldRelax := sleep >= osRelaxMinNS
6095 if shouldRelax {
6096 osRelax(true)
6097 }
6098 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6099 if shouldRelax {
6100 osRelax(false)
6101 }
6102 lock(&sched.lock)
6103 sched.sysmonwait.Store(false)
6104 noteclear(&sched.sysmonnote)
6105 }
6106 if syscallWake {
6107 idle = 0
6108 delay = 20
6109 }
6110 }
6111 unlock(&sched.lock)
6112 }
6113
6114 lock(&sched.sysmonlock)
6115
6116
6117 now = nanotime()
6118
6119
6120 if *cgo_yield != nil {
6121 asmcgocall(*cgo_yield, nil)
6122 }
6123
6124 lastpoll := sched.lastpoll.Load()
6125 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6126 sched.lastpoll.CompareAndSwap(lastpoll, now)
6127 list, delta := netpoll(0)
6128 if !list.empty() {
6129
6130
6131
6132
6133
6134
6135
6136 incidlelocked(-1)
6137 injectglist(&list)
6138 incidlelocked(1)
6139 netpollAdjustWaiters(delta)
6140 }
6141 }
6142 if GOOS == "netbsd" && needSysmonWorkaround {
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158 if next := timeSleepUntil(); next < now {
6159 startm(nil, false, false)
6160 }
6161 }
6162 if scavenger.sysmonWake.Load() != 0 {
6163
6164 scavenger.wake()
6165 }
6166
6167
6168 if retake(now) != 0 {
6169 idle = 0
6170 } else {
6171 idle++
6172 }
6173
6174 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6175 lock(&forcegc.lock)
6176 forcegc.idle.Store(false)
6177 var list gList
6178 list.push(forcegc.g)
6179 injectglist(&list)
6180 unlock(&forcegc.lock)
6181 }
6182 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6183 lasttrace = now
6184 schedtrace(debug.scheddetail > 0)
6185 }
6186 unlock(&sched.sysmonlock)
6187 }
6188 }
6189
6190 type sysmontick struct {
6191 schedtick uint32
6192 syscalltick uint32
6193 schedwhen int64
6194 syscallwhen int64
6195 }
6196
6197
6198
6199 const forcePreemptNS = 10 * 1000 * 1000
6200
6201 func retake(now int64) uint32 {
6202 n := 0
6203
6204
6205 lock(&allpLock)
6206
6207
6208
6209 for i := 0; i < len(allp); i++ {
6210 pp := allp[i]
6211 if pp == nil {
6212
6213
6214 continue
6215 }
6216 pd := &pp.sysmontick
6217 s := pp.status
6218 sysretake := false
6219 if s == _Prunning || s == _Psyscall {
6220
6221
6222
6223
6224 t := int64(pp.schedtick)
6225 if int64(pd.schedtick) != t {
6226 pd.schedtick = uint32(t)
6227 pd.schedwhen = now
6228 } else if pd.schedwhen+forcePreemptNS <= now {
6229 preemptone(pp)
6230
6231
6232 sysretake = true
6233 }
6234 }
6235 if s == _Psyscall {
6236
6237 t := int64(pp.syscalltick)
6238 if !sysretake && int64(pd.syscalltick) != t {
6239 pd.syscalltick = uint32(t)
6240 pd.syscallwhen = now
6241 continue
6242 }
6243
6244
6245
6246 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6247 continue
6248 }
6249
6250 unlock(&allpLock)
6251
6252
6253
6254
6255 incidlelocked(-1)
6256 trace := traceAcquire()
6257 if atomic.Cas(&pp.status, s, _Pidle) {
6258 if trace.ok() {
6259 trace.ProcSteal(pp, false)
6260 traceRelease(trace)
6261 }
6262 n++
6263 pp.syscalltick++
6264 handoffp(pp)
6265 } else if trace.ok() {
6266 traceRelease(trace)
6267 }
6268 incidlelocked(1)
6269 lock(&allpLock)
6270 }
6271 }
6272 unlock(&allpLock)
6273 return uint32(n)
6274 }
6275
6276
6277
6278
6279
6280
6281 func preemptall() bool {
6282 res := false
6283 for _, pp := range allp {
6284 if pp.status != _Prunning {
6285 continue
6286 }
6287 if preemptone(pp) {
6288 res = true
6289 }
6290 }
6291 return res
6292 }
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304 func preemptone(pp *p) bool {
6305 mp := pp.m.ptr()
6306 if mp == nil || mp == getg().m {
6307 return false
6308 }
6309 gp := mp.curg
6310 if gp == nil || gp == mp.g0 {
6311 return false
6312 }
6313
6314 gp.preempt = true
6315
6316
6317
6318
6319
6320 gp.stackguard0 = stackPreempt
6321
6322
6323 if preemptMSupported && debug.asyncpreemptoff == 0 {
6324 pp.preempt = true
6325 preemptM(mp)
6326 }
6327
6328 return true
6329 }
6330
6331 var starttime int64
6332
6333 func schedtrace(detailed bool) {
6334 now := nanotime()
6335 if starttime == 0 {
6336 starttime = now
6337 }
6338
6339 lock(&sched.lock)
6340 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6341 if detailed {
6342 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6343 }
6344
6345
6346
6347 for i, pp := range allp {
6348 mp := pp.m.ptr()
6349 h := atomic.Load(&pp.runqhead)
6350 t := atomic.Load(&pp.runqtail)
6351 if detailed {
6352 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6353 if mp != nil {
6354 print(mp.id)
6355 } else {
6356 print("nil")
6357 }
6358 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6359 } else {
6360
6361
6362 print(" ")
6363 if i == 0 {
6364 print("[")
6365 }
6366 print(t - h)
6367 if i == len(allp)-1 {
6368 print("]\n")
6369 }
6370 }
6371 }
6372
6373 if !detailed {
6374 unlock(&sched.lock)
6375 return
6376 }
6377
6378 for mp := allm; mp != nil; mp = mp.alllink {
6379 pp := mp.p.ptr()
6380 print(" M", mp.id, ": p=")
6381 if pp != nil {
6382 print(pp.id)
6383 } else {
6384 print("nil")
6385 }
6386 print(" curg=")
6387 if mp.curg != nil {
6388 print(mp.curg.goid)
6389 } else {
6390 print("nil")
6391 }
6392 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6393 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6394 print(lockedg.goid)
6395 } else {
6396 print("nil")
6397 }
6398 print("\n")
6399 }
6400
6401 forEachG(func(gp *g) {
6402 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6403 if gp.m != nil {
6404 print(gp.m.id)
6405 } else {
6406 print("nil")
6407 }
6408 print(" lockedm=")
6409 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6410 print(lockedm.id)
6411 } else {
6412 print("nil")
6413 }
6414 print("\n")
6415 })
6416 unlock(&sched.lock)
6417 }
6418
6419
6420
6421
6422
6423
6424 func schedEnableUser(enable bool) {
6425 lock(&sched.lock)
6426 if sched.disable.user == !enable {
6427 unlock(&sched.lock)
6428 return
6429 }
6430 sched.disable.user = !enable
6431 if enable {
6432 n := sched.disable.n
6433 sched.disable.n = 0
6434 globrunqputbatch(&sched.disable.runnable, n)
6435 unlock(&sched.lock)
6436 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6437 startm(nil, false, false)
6438 }
6439 } else {
6440 unlock(&sched.lock)
6441 }
6442 }
6443
6444
6445
6446
6447
6448 func schedEnabled(gp *g) bool {
6449 assertLockHeld(&sched.lock)
6450
6451 if sched.disable.user {
6452 return isSystemGoroutine(gp, true)
6453 }
6454 return true
6455 }
6456
6457
6458
6459
6460
6461
6462 func mput(mp *m) {
6463 assertLockHeld(&sched.lock)
6464
6465 mp.schedlink = sched.midle
6466 sched.midle.set(mp)
6467 sched.nmidle++
6468 checkdead()
6469 }
6470
6471
6472
6473
6474
6475
6476 func mget() *m {
6477 assertLockHeld(&sched.lock)
6478
6479 mp := sched.midle.ptr()
6480 if mp != nil {
6481 sched.midle = mp.schedlink
6482 sched.nmidle--
6483 }
6484 return mp
6485 }
6486
6487
6488
6489
6490
6491
6492 func globrunqput(gp *g) {
6493 assertLockHeld(&sched.lock)
6494
6495 sched.runq.pushBack(gp)
6496 sched.runqsize++
6497 }
6498
6499
6500
6501
6502
6503
6504 func globrunqputhead(gp *g) {
6505 assertLockHeld(&sched.lock)
6506
6507 sched.runq.push(gp)
6508 sched.runqsize++
6509 }
6510
6511
6512
6513
6514
6515
6516
6517 func globrunqputbatch(batch *gQueue, n int32) {
6518 assertLockHeld(&sched.lock)
6519
6520 sched.runq.pushBackAll(*batch)
6521 sched.runqsize += n
6522 *batch = gQueue{}
6523 }
6524
6525
6526
6527 func globrunqget(pp *p, max int32) *g {
6528 assertLockHeld(&sched.lock)
6529
6530 if sched.runqsize == 0 {
6531 return nil
6532 }
6533
6534 n := sched.runqsize/gomaxprocs + 1
6535 if n > sched.runqsize {
6536 n = sched.runqsize
6537 }
6538 if max > 0 && n > max {
6539 n = max
6540 }
6541 if n > int32(len(pp.runq))/2 {
6542 n = int32(len(pp.runq)) / 2
6543 }
6544
6545 sched.runqsize -= n
6546
6547 gp := sched.runq.pop()
6548 n--
6549 for ; n > 0; n-- {
6550 gp1 := sched.runq.pop()
6551 runqput(pp, gp1, false)
6552 }
6553 return gp
6554 }
6555
6556
6557 type pMask []uint32
6558
6559
6560 func (p pMask) read(id uint32) bool {
6561 word := id / 32
6562 mask := uint32(1) << (id % 32)
6563 return (atomic.Load(&p[word]) & mask) != 0
6564 }
6565
6566
6567 func (p pMask) set(id int32) {
6568 word := id / 32
6569 mask := uint32(1) << (id % 32)
6570 atomic.Or(&p[word], mask)
6571 }
6572
6573
6574 func (p pMask) clear(id int32) {
6575 word := id / 32
6576 mask := uint32(1) << (id % 32)
6577 atomic.And(&p[word], ^mask)
6578 }
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591 func pidleput(pp *p, now int64) int64 {
6592 assertLockHeld(&sched.lock)
6593
6594 if !runqempty(pp) {
6595 throw("pidleput: P has non-empty run queue")
6596 }
6597 if now == 0 {
6598 now = nanotime()
6599 }
6600 if pp.timers.len.Load() == 0 {
6601 timerpMask.clear(pp.id)
6602 }
6603 idlepMask.set(pp.id)
6604 pp.link = sched.pidle
6605 sched.pidle.set(pp)
6606 sched.npidle.Add(1)
6607 if !pp.limiterEvent.start(limiterEventIdle, now) {
6608 throw("must be able to track idle limiter event")
6609 }
6610 return now
6611 }
6612
6613
6614
6615
6616
6617
6618
6619
6620 func pidleget(now int64) (*p, int64) {
6621 assertLockHeld(&sched.lock)
6622
6623 pp := sched.pidle.ptr()
6624 if pp != nil {
6625
6626 if now == 0 {
6627 now = nanotime()
6628 }
6629 timerpMask.set(pp.id)
6630 idlepMask.clear(pp.id)
6631 sched.pidle = pp.link
6632 sched.npidle.Add(-1)
6633 pp.limiterEvent.stop(limiterEventIdle, now)
6634 }
6635 return pp, now
6636 }
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648 func pidlegetSpinning(now int64) (*p, int64) {
6649 assertLockHeld(&sched.lock)
6650
6651 pp, now := pidleget(now)
6652 if pp == nil {
6653
6654
6655
6656 sched.needspinning.Store(1)
6657 return nil, now
6658 }
6659
6660 return pp, now
6661 }
6662
6663
6664
6665 func runqempty(pp *p) bool {
6666
6667
6668
6669
6670 for {
6671 head := atomic.Load(&pp.runqhead)
6672 tail := atomic.Load(&pp.runqtail)
6673 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6674 if tail == atomic.Load(&pp.runqtail) {
6675 return head == tail && runnext == 0
6676 }
6677 }
6678 }
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689 const randomizeScheduler = raceenabled
6690
6691
6692
6693
6694
6695
6696 func runqput(pp *p, gp *g, next bool) {
6697 if !haveSysmon && next {
6698
6699
6700
6701
6702
6703
6704
6705
6706 next = false
6707 }
6708 if randomizeScheduler && next && randn(2) == 0 {
6709 next = false
6710 }
6711
6712 if next {
6713 retryNext:
6714 oldnext := pp.runnext
6715 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6716 goto retryNext
6717 }
6718 if oldnext == 0 {
6719 return
6720 }
6721
6722 gp = oldnext.ptr()
6723 }
6724
6725 retry:
6726 h := atomic.LoadAcq(&pp.runqhead)
6727 t := pp.runqtail
6728 if t-h < uint32(len(pp.runq)) {
6729 pp.runq[t%uint32(len(pp.runq))].set(gp)
6730 atomic.StoreRel(&pp.runqtail, t+1)
6731 return
6732 }
6733 if runqputslow(pp, gp, h, t) {
6734 return
6735 }
6736
6737 goto retry
6738 }
6739
6740
6741
6742 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6743 var batch [len(pp.runq)/2 + 1]*g
6744
6745
6746 n := t - h
6747 n = n / 2
6748 if n != uint32(len(pp.runq)/2) {
6749 throw("runqputslow: queue is not full")
6750 }
6751 for i := uint32(0); i < n; i++ {
6752 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6753 }
6754 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6755 return false
6756 }
6757 batch[n] = gp
6758
6759 if randomizeScheduler {
6760 for i := uint32(1); i <= n; i++ {
6761 j := cheaprandn(i + 1)
6762 batch[i], batch[j] = batch[j], batch[i]
6763 }
6764 }
6765
6766
6767 for i := uint32(0); i < n; i++ {
6768 batch[i].schedlink.set(batch[i+1])
6769 }
6770 var q gQueue
6771 q.head.set(batch[0])
6772 q.tail.set(batch[n])
6773
6774
6775 lock(&sched.lock)
6776 globrunqputbatch(&q, int32(n+1))
6777 unlock(&sched.lock)
6778 return true
6779 }
6780
6781
6782
6783
6784
6785 func runqputbatch(pp *p, q *gQueue, qsize int) {
6786 h := atomic.LoadAcq(&pp.runqhead)
6787 t := pp.runqtail
6788 n := uint32(0)
6789 for !q.empty() && t-h < uint32(len(pp.runq)) {
6790 gp := q.pop()
6791 pp.runq[t%uint32(len(pp.runq))].set(gp)
6792 t++
6793 n++
6794 }
6795 qsize -= int(n)
6796
6797 if randomizeScheduler {
6798 off := func(o uint32) uint32 {
6799 return (pp.runqtail + o) % uint32(len(pp.runq))
6800 }
6801 for i := uint32(1); i < n; i++ {
6802 j := cheaprandn(i + 1)
6803 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6804 }
6805 }
6806
6807 atomic.StoreRel(&pp.runqtail, t)
6808 if !q.empty() {
6809 lock(&sched.lock)
6810 globrunqputbatch(q, int32(qsize))
6811 unlock(&sched.lock)
6812 }
6813 }
6814
6815
6816
6817
6818
6819 func runqget(pp *p) (gp *g, inheritTime bool) {
6820
6821 next := pp.runnext
6822
6823
6824
6825 if next != 0 && pp.runnext.cas(next, 0) {
6826 return next.ptr(), true
6827 }
6828
6829 for {
6830 h := atomic.LoadAcq(&pp.runqhead)
6831 t := pp.runqtail
6832 if t == h {
6833 return nil, false
6834 }
6835 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6836 if atomic.CasRel(&pp.runqhead, h, h+1) {
6837 return gp, false
6838 }
6839 }
6840 }
6841
6842
6843
6844 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6845 oldNext := pp.runnext
6846 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6847 drainQ.pushBack(oldNext.ptr())
6848 n++
6849 }
6850
6851 retry:
6852 h := atomic.LoadAcq(&pp.runqhead)
6853 t := pp.runqtail
6854 qn := t - h
6855 if qn == 0 {
6856 return
6857 }
6858 if qn > uint32(len(pp.runq)) {
6859 goto retry
6860 }
6861
6862 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6863 goto retry
6864 }
6865
6866
6867
6868
6869
6870
6871
6872
6873 for i := uint32(0); i < qn; i++ {
6874 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6875 drainQ.pushBack(gp)
6876 n++
6877 }
6878 return
6879 }
6880
6881
6882
6883
6884
6885 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6886 for {
6887 h := atomic.LoadAcq(&pp.runqhead)
6888 t := atomic.LoadAcq(&pp.runqtail)
6889 n := t - h
6890 n = n - n/2
6891 if n == 0 {
6892 if stealRunNextG {
6893
6894 if next := pp.runnext; next != 0 {
6895 if pp.status == _Prunning {
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906 if !osHasLowResTimer {
6907 usleep(3)
6908 } else {
6909
6910
6911
6912 osyield()
6913 }
6914 }
6915 if !pp.runnext.cas(next, 0) {
6916 continue
6917 }
6918 batch[batchHead%uint32(len(batch))] = next
6919 return 1
6920 }
6921 }
6922 return 0
6923 }
6924 if n > uint32(len(pp.runq)/2) {
6925 continue
6926 }
6927 for i := uint32(0); i < n; i++ {
6928 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6929 batch[(batchHead+i)%uint32(len(batch))] = g
6930 }
6931 if atomic.CasRel(&pp.runqhead, h, h+n) {
6932 return n
6933 }
6934 }
6935 }
6936
6937
6938
6939
6940 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6941 t := pp.runqtail
6942 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6943 if n == 0 {
6944 return nil
6945 }
6946 n--
6947 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6948 if n == 0 {
6949 return gp
6950 }
6951 h := atomic.LoadAcq(&pp.runqhead)
6952 if t-h+n >= uint32(len(pp.runq)) {
6953 throw("runqsteal: runq overflow")
6954 }
6955 atomic.StoreRel(&pp.runqtail, t+n)
6956 return gp
6957 }
6958
6959
6960
6961 type gQueue struct {
6962 head guintptr
6963 tail guintptr
6964 }
6965
6966
6967 func (q *gQueue) empty() bool {
6968 return q.head == 0
6969 }
6970
6971
6972 func (q *gQueue) push(gp *g) {
6973 gp.schedlink = q.head
6974 q.head.set(gp)
6975 if q.tail == 0 {
6976 q.tail.set(gp)
6977 }
6978 }
6979
6980
6981 func (q *gQueue) pushBack(gp *g) {
6982 gp.schedlink = 0
6983 if q.tail != 0 {
6984 q.tail.ptr().schedlink.set(gp)
6985 } else {
6986 q.head.set(gp)
6987 }
6988 q.tail.set(gp)
6989 }
6990
6991
6992
6993 func (q *gQueue) pushBackAll(q2 gQueue) {
6994 if q2.tail == 0 {
6995 return
6996 }
6997 q2.tail.ptr().schedlink = 0
6998 if q.tail != 0 {
6999 q.tail.ptr().schedlink = q2.head
7000 } else {
7001 q.head = q2.head
7002 }
7003 q.tail = q2.tail
7004 }
7005
7006
7007
7008 func (q *gQueue) pop() *g {
7009 gp := q.head.ptr()
7010 if gp != nil {
7011 q.head = gp.schedlink
7012 if q.head == 0 {
7013 q.tail = 0
7014 }
7015 }
7016 return gp
7017 }
7018
7019
7020 func (q *gQueue) popList() gList {
7021 stack := gList{q.head}
7022 *q = gQueue{}
7023 return stack
7024 }
7025
7026
7027
7028 type gList struct {
7029 head guintptr
7030 }
7031
7032
7033 func (l *gList) empty() bool {
7034 return l.head == 0
7035 }
7036
7037
7038 func (l *gList) push(gp *g) {
7039 gp.schedlink = l.head
7040 l.head.set(gp)
7041 }
7042
7043
7044 func (l *gList) pushAll(q gQueue) {
7045 if !q.empty() {
7046 q.tail.ptr().schedlink = l.head
7047 l.head = q.head
7048 }
7049 }
7050
7051
7052 func (l *gList) pop() *g {
7053 gp := l.head.ptr()
7054 if gp != nil {
7055 l.head = gp.schedlink
7056 }
7057 return gp
7058 }
7059
7060
7061 func setMaxThreads(in int) (out int) {
7062 lock(&sched.lock)
7063 out = int(sched.maxmcount)
7064 if in > 0x7fffffff {
7065 sched.maxmcount = 0x7fffffff
7066 } else {
7067 sched.maxmcount = int32(in)
7068 }
7069 checkmcount()
7070 unlock(&sched.lock)
7071 return
7072 }
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086 func procPin() int {
7087 gp := getg()
7088 mp := gp.m
7089
7090 mp.locks++
7091 return int(mp.p.ptr().id)
7092 }
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106 func procUnpin() {
7107 gp := getg()
7108 gp.m.locks--
7109 }
7110
7111
7112
7113 func sync_runtime_procPin() int {
7114 return procPin()
7115 }
7116
7117
7118
7119 func sync_runtime_procUnpin() {
7120 procUnpin()
7121 }
7122
7123
7124
7125 func sync_atomic_runtime_procPin() int {
7126 return procPin()
7127 }
7128
7129
7130
7131 func sync_atomic_runtime_procUnpin() {
7132 procUnpin()
7133 }
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149 func sync_runtime_canSpin(i int) bool {
7150
7151
7152
7153
7154
7155 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7156 return false
7157 }
7158 if p := getg().m.p.ptr(); !runqempty(p) {
7159 return false
7160 }
7161 return true
7162 }
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176 func sync_runtime_doSpin() {
7177 procyield(active_spin_cnt)
7178 }
7179
7180 var stealOrder randomOrder
7181
7182
7183
7184
7185
7186 type randomOrder struct {
7187 count uint32
7188 coprimes []uint32
7189 }
7190
7191 type randomEnum struct {
7192 i uint32
7193 count uint32
7194 pos uint32
7195 inc uint32
7196 }
7197
7198 func (ord *randomOrder) reset(count uint32) {
7199 ord.count = count
7200 ord.coprimes = ord.coprimes[:0]
7201 for i := uint32(1); i <= count; i++ {
7202 if gcd(i, count) == 1 {
7203 ord.coprimes = append(ord.coprimes, i)
7204 }
7205 }
7206 }
7207
7208 func (ord *randomOrder) start(i uint32) randomEnum {
7209 return randomEnum{
7210 count: ord.count,
7211 pos: i % ord.count,
7212 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7213 }
7214 }
7215
7216 func (enum *randomEnum) done() bool {
7217 return enum.i == enum.count
7218 }
7219
7220 func (enum *randomEnum) next() {
7221 enum.i++
7222 enum.pos = (enum.pos + enum.inc) % enum.count
7223 }
7224
7225 func (enum *randomEnum) position() uint32 {
7226 return enum.pos
7227 }
7228
7229 func gcd(a, b uint32) uint32 {
7230 for b != 0 {
7231 a, b = b, a%b
7232 }
7233 return a
7234 }
7235
7236
7237
7238 type initTask struct {
7239 state uint32
7240 nfns uint32
7241
7242 }
7243
7244
7245
7246 var inittrace tracestat
7247
7248 type tracestat struct {
7249 active bool
7250 id uint64
7251 allocs uint64
7252 bytes uint64
7253 }
7254
7255 func doInit(ts []*initTask) {
7256 for _, t := range ts {
7257 doInit1(t)
7258 }
7259 }
7260
7261 func doInit1(t *initTask) {
7262 switch t.state {
7263 case 2:
7264 return
7265 case 1:
7266 throw("recursive call during initialization - linker skew")
7267 default:
7268 t.state = 1
7269
7270 var (
7271 start int64
7272 before tracestat
7273 )
7274
7275 if inittrace.active {
7276 start = nanotime()
7277
7278 before = inittrace
7279 }
7280
7281 if t.nfns == 0 {
7282
7283 throw("inittask with no functions")
7284 }
7285
7286 firstFunc := add(unsafe.Pointer(t), 8)
7287 for i := uint32(0); i < t.nfns; i++ {
7288 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7289 f := *(*func())(unsafe.Pointer(&p))
7290 f()
7291 }
7292
7293 if inittrace.active {
7294 end := nanotime()
7295
7296 after := inittrace
7297
7298 f := *(*func())(unsafe.Pointer(&firstFunc))
7299 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7300
7301 var sbuf [24]byte
7302 print("init ", pkg, " @")
7303 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7304 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7305 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7306 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7307 print("\n")
7308 }
7309
7310 t.state = 2
7311 }
7312 }
7313
View as plain text