Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284 if raceenabled {
285 runExitHooks(0)
286 racefini()
287 }
288
289
290
291
292
293 if runningPanicDefers.Load() != 0 {
294
295 for c := 0; c < 1000; c++ {
296 if runningPanicDefers.Load() == 0 {
297 break
298 }
299 Gosched()
300 }
301 }
302 if panicking.Load() != 0 {
303 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
304 }
305 runExitHooks(0)
306
307 exit(0)
308 for {
309 var x *int32
310 *x = 0
311 }
312 }
313
314
315
316
317 func os_beforeExit(exitCode int) {
318 runExitHooks(exitCode)
319 if exitCode == 0 && raceenabled {
320 racefini()
321 }
322 }
323
324 func init() {
325 exithook.Gosched = Gosched
326 exithook.Goid = func() uint64 { return getg().goid }
327 exithook.Throw = throw
328 }
329
330 func runExitHooks(code int) {
331 exithook.Run(code)
332 }
333
334
335 func init() {
336 go forcegchelper()
337 }
338
339 func forcegchelper() {
340 forcegc.g = getg()
341 lockInit(&forcegc.lock, lockRankForcegc)
342 for {
343 lock(&forcegc.lock)
344 if forcegc.idle.Load() {
345 throw("forcegc: phase error")
346 }
347 forcegc.idle.Store(true)
348 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
349
350 if debug.gctrace > 0 {
351 println("GC forced")
352 }
353
354 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
355 }
356 }
357
358
359
360
361
362 func Gosched() {
363 checkTimeouts()
364 mcall(gosched_m)
365 }
366
367
368
369
370
371 func goschedguarded() {
372 mcall(goschedguarded_m)
373 }
374
375
376
377
378
379
380 func goschedIfBusy() {
381 gp := getg()
382
383
384 if !gp.preempt && sched.npidle.Load() > 0 {
385 return
386 }
387 mcall(gosched_m)
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
419 if reason != waitReasonSleep {
420 checkTimeouts()
421 }
422 mp := acquirem()
423 gp := mp.curg
424 status := readgstatus(gp)
425 if status != _Grunning && status != _Gscanrunning {
426 throw("gopark: bad g status")
427 }
428 mp.waitlock = lock
429 mp.waitunlockf = unlockf
430 gp.waitreason = reason
431 mp.waitTraceBlockReason = traceReason
432 mp.waitTraceSkip = traceskip
433 releasem(mp)
434
435 mcall(park_m)
436 }
437
438
439
440 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
441 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
442 }
443
444
445
446
447
448
449
450
451
452
453
454 func goready(gp *g, traceskip int) {
455 systemstack(func() {
456 ready(gp, traceskip, true)
457 })
458 }
459
460
461 func acquireSudog() *sudog {
462
463
464
465
466
467
468
469
470 mp := acquirem()
471 pp := mp.p.ptr()
472 if len(pp.sudogcache) == 0 {
473 lock(&sched.sudoglock)
474
475 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
476 s := sched.sudogcache
477 sched.sudogcache = s.next
478 s.next = nil
479 pp.sudogcache = append(pp.sudogcache, s)
480 }
481 unlock(&sched.sudoglock)
482
483 if len(pp.sudogcache) == 0 {
484 pp.sudogcache = append(pp.sudogcache, new(sudog))
485 }
486 }
487 n := len(pp.sudogcache)
488 s := pp.sudogcache[n-1]
489 pp.sudogcache[n-1] = nil
490 pp.sudogcache = pp.sudogcache[:n-1]
491 if s.elem != nil {
492 throw("acquireSudog: found s.elem != nil in cache")
493 }
494 releasem(mp)
495 return s
496 }
497
498
499 func releaseSudog(s *sudog) {
500 if s.elem != nil {
501 throw("runtime: sudog with non-nil elem")
502 }
503 if s.isSelect {
504 throw("runtime: sudog with non-false isSelect")
505 }
506 if s.next != nil {
507 throw("runtime: sudog with non-nil next")
508 }
509 if s.prev != nil {
510 throw("runtime: sudog with non-nil prev")
511 }
512 if s.waitlink != nil {
513 throw("runtime: sudog with non-nil waitlink")
514 }
515 if s.c != nil {
516 throw("runtime: sudog with non-nil c")
517 }
518 gp := getg()
519 if gp.param != nil {
520 throw("runtime: releaseSudog with non-nil gp.param")
521 }
522 mp := acquirem()
523 pp := mp.p.ptr()
524 if len(pp.sudogcache) == cap(pp.sudogcache) {
525
526 var first, last *sudog
527 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
528 n := len(pp.sudogcache)
529 p := pp.sudogcache[n-1]
530 pp.sudogcache[n-1] = nil
531 pp.sudogcache = pp.sudogcache[:n-1]
532 if first == nil {
533 first = p
534 } else {
535 last.next = p
536 }
537 last = p
538 }
539 lock(&sched.sudoglock)
540 last.next = sched.sudogcache
541 sched.sudogcache = first
542 unlock(&sched.sudoglock)
543 }
544 pp.sudogcache = append(pp.sudogcache, s)
545 releasem(mp)
546 }
547
548
549 func badmcall(fn func(*g)) {
550 throw("runtime: mcall called on m->g0 stack")
551 }
552
553 func badmcall2(fn func(*g)) {
554 throw("runtime: mcall function returned")
555 }
556
557 func badreflectcall() {
558 panic(plainError("arg size to reflect.call more than 1GB"))
559 }
560
561
562
563 func badmorestackg0() {
564 if !crashStackImplemented {
565 writeErrStr("fatal: morestack on g0\n")
566 return
567 }
568
569 g := getg()
570 switchToCrashStack(func() {
571 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
572 g.m.traceback = 2
573 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
574 print("\n")
575
576 throw("morestack on g0")
577 })
578 }
579
580
581
582 func badmorestackgsignal() {
583 writeErrStr("fatal: morestack on gsignal\n")
584 }
585
586
587 func badctxt() {
588 throw("ctxt != 0")
589 }
590
591
592
593 var gcrash g
594
595 var crashingG atomic.Pointer[g]
596
597
598
599
600
601
602
603
604
605 func switchToCrashStack(fn func()) {
606 me := getg()
607 if crashingG.CompareAndSwapNoWB(nil, me) {
608 switchToCrashStack0(fn)
609 abort()
610 }
611 if crashingG.Load() == me {
612
613 writeErrStr("fatal: recursive switchToCrashStack\n")
614 abort()
615 }
616
617 usleep_no_g(100)
618 writeErrStr("fatal: concurrent switchToCrashStack\n")
619 abort()
620 }
621
622
623
624
625 const crashStackImplemented = GOOS != "windows"
626
627
628 func switchToCrashStack0(fn func())
629
630 func lockedOSThread() bool {
631 gp := getg()
632 return gp.lockedm != 0 && gp.m.lockedg != 0
633 }
634
635 var (
636
637
638
639
640
641
642 allglock mutex
643 allgs []*g
644
645
646
647
648
649
650
651
652
653
654
655
656
657 allglen uintptr
658 allgptr **g
659 )
660
661 func allgadd(gp *g) {
662 if readgstatus(gp) == _Gidle {
663 throw("allgadd: bad status Gidle")
664 }
665
666 lock(&allglock)
667 allgs = append(allgs, gp)
668 if &allgs[0] != allgptr {
669 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
670 }
671 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
672 unlock(&allglock)
673 }
674
675
676
677
678 func allGsSnapshot() []*g {
679 assertWorldStoppedOrLockHeld(&allglock)
680
681
682
683
684
685
686 return allgs[:len(allgs):len(allgs)]
687 }
688
689
690 func atomicAllG() (**g, uintptr) {
691 length := atomic.Loaduintptr(&allglen)
692 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
693 return ptr, length
694 }
695
696
697 func atomicAllGIndex(ptr **g, i uintptr) *g {
698 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
699 }
700
701
702
703
704 func forEachG(fn func(gp *g)) {
705 lock(&allglock)
706 for _, gp := range allgs {
707 fn(gp)
708 }
709 unlock(&allglock)
710 }
711
712
713
714
715
716 func forEachGRace(fn func(gp *g)) {
717 ptr, length := atomicAllG()
718 for i := uintptr(0); i < length; i++ {
719 gp := atomicAllGIndex(ptr, i)
720 fn(gp)
721 }
722 return
723 }
724
725 const (
726
727
728 _GoidCacheBatch = 16
729 )
730
731
732
733 func cpuinit(env string) {
734 switch GOOS {
735 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
736 cpu.DebugOptions = true
737 }
738 cpu.Initialize(env)
739
740
741
742 switch GOARCH {
743 case "386", "amd64":
744 x86HasPOPCNT = cpu.X86.HasPOPCNT
745 x86HasSSE41 = cpu.X86.HasSSE41
746 x86HasFMA = cpu.X86.HasFMA
747
748 case "arm":
749 armHasVFPv4 = cpu.ARM.HasVFPv4
750
751 case "arm64":
752 arm64HasATOMICS = cpu.ARM64.HasATOMICS
753
754 case "loong64":
755 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
756 loong64HasLSX = cpu.Loong64.HasLSX
757 }
758 }
759
760
761
762
763 func getGodebugEarly() string {
764 const prefix = "GODEBUG="
765 var env string
766 switch GOOS {
767 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
768
769
770
771 n := int32(0)
772 for argv_index(argv, argc+1+n) != nil {
773 n++
774 }
775
776 for i := int32(0); i < n; i++ {
777 p := argv_index(argv, argc+1+i)
778 s := unsafe.String(p, findnull(p))
779
780 if stringslite.HasPrefix(s, prefix) {
781 env = gostring(p)[len(prefix):]
782 break
783 }
784 }
785 }
786 return env
787 }
788
789
790
791
792
793
794
795
796
797 func schedinit() {
798 lockInit(&sched.lock, lockRankSched)
799 lockInit(&sched.sysmonlock, lockRankSysmon)
800 lockInit(&sched.deferlock, lockRankDefer)
801 lockInit(&sched.sudoglock, lockRankSudog)
802 lockInit(&deadlock, lockRankDeadlock)
803 lockInit(&paniclk, lockRankPanic)
804 lockInit(&allglock, lockRankAllg)
805 lockInit(&allpLock, lockRankAllp)
806 lockInit(&reflectOffs.lock, lockRankReflectOffs)
807 lockInit(&finlock, lockRankFin)
808 lockInit(&cpuprof.lock, lockRankCpuprof)
809 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
810 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
811 traceLockInit()
812
813
814
815 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
816
817
818
819 gp := getg()
820 if raceenabled {
821 gp.racectx, raceprocctx0 = raceinit()
822 }
823
824 sched.maxmcount = 10000
825 crashFD.Store(^uintptr(0))
826
827
828 worldStopped()
829
830 ticks.init()
831 moduledataverify()
832 stackinit()
833 mallocinit()
834 godebug := getGodebugEarly()
835 cpuinit(godebug)
836 randinit()
837 alginit()
838 mcommoninit(gp.m, -1)
839 modulesinit()
840 typelinksinit()
841 itabsinit()
842 stkobjinit()
843
844 sigsave(&gp.m.sigmask)
845 initSigmask = gp.m.sigmask
846
847 goargs()
848 goenvs()
849 secure()
850 checkfds()
851 parsedebugvars()
852 gcinit()
853
854
855
856 gcrash.stack = stackalloc(16384)
857 gcrash.stackguard0 = gcrash.stack.lo + 1000
858 gcrash.stackguard1 = gcrash.stack.lo + 1000
859
860
861
862
863
864 if disableMemoryProfiling {
865 MemProfileRate = 0
866 }
867
868
869 mProfStackInit(gp.m)
870
871 lock(&sched.lock)
872 sched.lastpoll.Store(nanotime())
873 procs := ncpu
874 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
875 procs = n
876 }
877 if procresize(procs) != nil {
878 throw("unknown runnable goroutine during bootstrap")
879 }
880 unlock(&sched.lock)
881
882
883 worldStarted()
884
885 if buildVersion == "" {
886
887
888 buildVersion = "unknown"
889 }
890 if len(modinfo) == 1 {
891
892
893 modinfo = ""
894 }
895 }
896
897 func dumpgstatus(gp *g) {
898 thisg := getg()
899 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
900 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
901 }
902
903
904 func checkmcount() {
905 assertLockHeld(&sched.lock)
906
907
908
909
910
911
912
913
914
915 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
916 if count > sched.maxmcount {
917 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
918 throw("thread exhaustion")
919 }
920 }
921
922
923
924
925
926 func mReserveID() int64 {
927 assertLockHeld(&sched.lock)
928
929 if sched.mnext+1 < sched.mnext {
930 throw("runtime: thread ID overflow")
931 }
932 id := sched.mnext
933 sched.mnext++
934 checkmcount()
935 return id
936 }
937
938
939 func mcommoninit(mp *m, id int64) {
940 gp := getg()
941
942
943 if gp != gp.m.g0 {
944 callers(1, mp.createstack[:])
945 }
946
947 lock(&sched.lock)
948
949 if id >= 0 {
950 mp.id = id
951 } else {
952 mp.id = mReserveID()
953 }
954
955 mrandinit(mp)
956
957 mpreinit(mp)
958 if mp.gsignal != nil {
959 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
960 }
961
962
963
964 mp.alllink = allm
965
966
967
968 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
969 unlock(&sched.lock)
970
971
972 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
973 mp.cgoCallers = new(cgoCallers)
974 }
975 mProfStackInit(mp)
976 }
977
978
979
980
981
982 func mProfStackInit(mp *m) {
983 if debug.profstackdepth == 0 {
984
985
986 return
987 }
988 mp.profStack = makeProfStackFP()
989 mp.mLockProfile.stack = makeProfStackFP()
990 }
991
992
993
994
995 func makeProfStackFP() []uintptr {
996
997
998
999
1000
1001
1002 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1003 }
1004
1005
1006
1007 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1008
1009
1010 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1011
1012 func (mp *m) becomeSpinning() {
1013 mp.spinning = true
1014 sched.nmspinning.Add(1)
1015 sched.needspinning.Store(0)
1016 }
1017
1018 func (mp *m) hasCgoOnStack() bool {
1019 return mp.ncgo > 0 || mp.isextra
1020 }
1021
1022 const (
1023
1024
1025 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1026
1027
1028
1029 osHasLowResClockInt = goos.IsWindows
1030
1031
1032
1033 osHasLowResClock = osHasLowResClockInt > 0
1034 )
1035
1036
1037 func ready(gp *g, traceskip int, next bool) {
1038 status := readgstatus(gp)
1039
1040
1041 mp := acquirem()
1042 if status&^_Gscan != _Gwaiting {
1043 dumpgstatus(gp)
1044 throw("bad g->status in ready")
1045 }
1046
1047
1048 trace := traceAcquire()
1049 casgstatus(gp, _Gwaiting, _Grunnable)
1050 if trace.ok() {
1051 trace.GoUnpark(gp, traceskip)
1052 traceRelease(trace)
1053 }
1054 runqput(mp.p.ptr(), gp, next)
1055 wakep()
1056 releasem(mp)
1057 }
1058
1059
1060
1061 const freezeStopWait = 0x7fffffff
1062
1063
1064
1065 var freezing atomic.Bool
1066
1067
1068
1069
1070 func freezetheworld() {
1071 freezing.Store(true)
1072 if debug.dontfreezetheworld > 0 {
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 usleep(1000)
1098 return
1099 }
1100
1101
1102
1103
1104 for i := 0; i < 5; i++ {
1105
1106 sched.stopwait = freezeStopWait
1107 sched.gcwaiting.Store(true)
1108
1109 if !preemptall() {
1110 break
1111 }
1112 usleep(1000)
1113 }
1114
1115 usleep(1000)
1116 preemptall()
1117 usleep(1000)
1118 }
1119
1120
1121
1122
1123
1124 func readgstatus(gp *g) uint32 {
1125 return gp.atomicstatus.Load()
1126 }
1127
1128
1129
1130
1131
1132 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1133 success := false
1134
1135
1136 switch oldval {
1137 default:
1138 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1139 dumpgstatus(gp)
1140 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1141 case _Gscanrunnable,
1142 _Gscanwaiting,
1143 _Gscanrunning,
1144 _Gscansyscall,
1145 _Gscanpreempted:
1146 if newval == oldval&^_Gscan {
1147 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1148 }
1149 }
1150 if !success {
1151 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1152 dumpgstatus(gp)
1153 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1154 }
1155 releaseLockRankAndM(lockRankGscan)
1156 }
1157
1158
1159
1160 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1161 switch oldval {
1162 case _Grunnable,
1163 _Grunning,
1164 _Gwaiting,
1165 _Gsyscall:
1166 if newval == oldval|_Gscan {
1167 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1168 if r {
1169 acquireLockRankAndM(lockRankGscan)
1170 }
1171 return r
1172
1173 }
1174 }
1175 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1176 throw("castogscanstatus")
1177 panic("not reached")
1178 }
1179
1180
1181
1182 var casgstatusAlwaysTrack = false
1183
1184
1185
1186
1187
1188
1189
1190 func casgstatus(gp *g, oldval, newval uint32) {
1191 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1192 systemstack(func() {
1193
1194
1195 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1196 throw("casgstatus: bad incoming values")
1197 })
1198 }
1199
1200 lockWithRankMayAcquire(nil, lockRankGscan)
1201
1202
1203 const yieldDelay = 5 * 1000
1204 var nextYield int64
1205
1206
1207
1208 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1209 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1210 systemstack(func() {
1211
1212
1213 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1214 })
1215 }
1216 if i == 0 {
1217 nextYield = nanotime() + yieldDelay
1218 }
1219 if nanotime() < nextYield {
1220 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1221 procyield(1)
1222 }
1223 } else {
1224 osyield()
1225 nextYield = nanotime() + yieldDelay/2
1226 }
1227 }
1228
1229 if oldval == _Grunning {
1230
1231 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1232 gp.tracking = true
1233 }
1234 gp.trackingSeq++
1235 }
1236 if !gp.tracking {
1237 return
1238 }
1239
1240
1241
1242
1243
1244
1245 switch oldval {
1246 case _Grunnable:
1247
1248
1249
1250 now := nanotime()
1251 gp.runnableTime += now - gp.trackingStamp
1252 gp.trackingStamp = 0
1253 case _Gwaiting:
1254 if !gp.waitreason.isMutexWait() {
1255
1256 break
1257 }
1258
1259
1260
1261
1262
1263 now := nanotime()
1264 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1265 gp.trackingStamp = 0
1266 }
1267 switch newval {
1268 case _Gwaiting:
1269 if !gp.waitreason.isMutexWait() {
1270
1271 break
1272 }
1273
1274 now := nanotime()
1275 gp.trackingStamp = now
1276 case _Grunnable:
1277
1278
1279 now := nanotime()
1280 gp.trackingStamp = now
1281 case _Grunning:
1282
1283
1284
1285 gp.tracking = false
1286 sched.timeToRun.record(gp.runnableTime)
1287 gp.runnableTime = 0
1288 }
1289 }
1290
1291
1292
1293
1294 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1295
1296 gp.waitreason = reason
1297 casgstatus(gp, old, _Gwaiting)
1298 }
1299
1300
1301
1302
1303
1304 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1305 if !reason.isWaitingForGC() {
1306 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1307 }
1308 casGToWaiting(gp, old, reason)
1309 }
1310
1311
1312
1313
1314
1315
1316
1317
1318 func casgcopystack(gp *g) uint32 {
1319 for {
1320 oldstatus := readgstatus(gp) &^ _Gscan
1321 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1322 throw("copystack: bad status, not Gwaiting or Grunnable")
1323 }
1324 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1325 return oldstatus
1326 }
1327 }
1328 }
1329
1330
1331
1332
1333
1334 func casGToPreemptScan(gp *g, old, new uint32) {
1335 if old != _Grunning || new != _Gscan|_Gpreempted {
1336 throw("bad g transition")
1337 }
1338 acquireLockRankAndM(lockRankGscan)
1339 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1340 }
1341 }
1342
1343
1344
1345
1346 func casGFromPreempted(gp *g, old, new uint32) bool {
1347 if old != _Gpreempted || new != _Gwaiting {
1348 throw("bad g transition")
1349 }
1350 gp.waitreason = waitReasonPreempted
1351 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1352 }
1353
1354
1355 type stwReason uint8
1356
1357
1358
1359
1360 const (
1361 stwUnknown stwReason = iota
1362 stwGCMarkTerm
1363 stwGCSweepTerm
1364 stwWriteHeapDump
1365 stwGoroutineProfile
1366 stwGoroutineProfileCleanup
1367 stwAllGoroutinesStack
1368 stwReadMemStats
1369 stwAllThreadsSyscall
1370 stwGOMAXPROCS
1371 stwStartTrace
1372 stwStopTrace
1373 stwForTestCountPagesInUse
1374 stwForTestReadMetricsSlow
1375 stwForTestReadMemStatsSlow
1376 stwForTestPageCachePagesLeaked
1377 stwForTestResetDebugLog
1378 )
1379
1380 func (r stwReason) String() string {
1381 return stwReasonStrings[r]
1382 }
1383
1384 func (r stwReason) isGC() bool {
1385 return r == stwGCMarkTerm || r == stwGCSweepTerm
1386 }
1387
1388
1389
1390
1391 var stwReasonStrings = [...]string{
1392 stwUnknown: "unknown",
1393 stwGCMarkTerm: "GC mark termination",
1394 stwGCSweepTerm: "GC sweep termination",
1395 stwWriteHeapDump: "write heap dump",
1396 stwGoroutineProfile: "goroutine profile",
1397 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1398 stwAllGoroutinesStack: "all goroutines stack trace",
1399 stwReadMemStats: "read mem stats",
1400 stwAllThreadsSyscall: "AllThreadsSyscall",
1401 stwGOMAXPROCS: "GOMAXPROCS",
1402 stwStartTrace: "start trace",
1403 stwStopTrace: "stop trace",
1404 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1405 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1406 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1407 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1408 stwForTestResetDebugLog: "ResetDebugLog (test)",
1409 }
1410
1411
1412
1413 type worldStop struct {
1414 reason stwReason
1415 startedStopping int64
1416 finishedStopping int64
1417 stoppingCPUTime int64
1418 }
1419
1420
1421
1422
1423 var stopTheWorldContext worldStop
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 func stopTheWorld(reason stwReason) worldStop {
1443 semacquire(&worldsema)
1444 gp := getg()
1445 gp.m.preemptoff = reason.String()
1446 systemstack(func() {
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1462 stopTheWorldContext = stopTheWorldWithSema(reason)
1463 casgstatus(gp, _Gwaiting, _Grunning)
1464 })
1465 return stopTheWorldContext
1466 }
1467
1468
1469
1470
1471 func startTheWorld(w worldStop) {
1472 systemstack(func() { startTheWorldWithSema(0, w) })
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489 mp := acquirem()
1490 mp.preemptoff = ""
1491 semrelease1(&worldsema, true, 0)
1492 releasem(mp)
1493 }
1494
1495
1496
1497
1498 func stopTheWorldGC(reason stwReason) worldStop {
1499 semacquire(&gcsema)
1500 return stopTheWorld(reason)
1501 }
1502
1503
1504
1505
1506 func startTheWorldGC(w worldStop) {
1507 startTheWorld(w)
1508 semrelease(&gcsema)
1509 }
1510
1511
1512 var worldsema uint32 = 1
1513
1514
1515
1516
1517
1518
1519
1520 var gcsema uint32 = 1
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 func stopTheWorldWithSema(reason stwReason) worldStop {
1553 trace := traceAcquire()
1554 if trace.ok() {
1555 trace.STWStart(reason)
1556 traceRelease(trace)
1557 }
1558 gp := getg()
1559
1560
1561
1562 if gp.m.locks > 0 {
1563 throw("stopTheWorld: holding locks")
1564 }
1565
1566 lock(&sched.lock)
1567 start := nanotime()
1568 sched.stopwait = gomaxprocs
1569 sched.gcwaiting.Store(true)
1570 preemptall()
1571
1572 gp.m.p.ptr().status = _Pgcstop
1573 gp.m.p.ptr().gcStopTime = start
1574 sched.stopwait--
1575
1576 trace = traceAcquire()
1577 for _, pp := range allp {
1578 s := pp.status
1579 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1580 if trace.ok() {
1581 trace.ProcSteal(pp, false)
1582 }
1583 pp.syscalltick++
1584 pp.gcStopTime = nanotime()
1585 sched.stopwait--
1586 }
1587 }
1588 if trace.ok() {
1589 traceRelease(trace)
1590 }
1591
1592
1593 now := nanotime()
1594 for {
1595 pp, _ := pidleget(now)
1596 if pp == nil {
1597 break
1598 }
1599 pp.status = _Pgcstop
1600 pp.gcStopTime = nanotime()
1601 sched.stopwait--
1602 }
1603 wait := sched.stopwait > 0
1604 unlock(&sched.lock)
1605
1606
1607 if wait {
1608 for {
1609
1610 if notetsleep(&sched.stopnote, 100*1000) {
1611 noteclear(&sched.stopnote)
1612 break
1613 }
1614 preemptall()
1615 }
1616 }
1617
1618 finish := nanotime()
1619 startTime := finish - start
1620 if reason.isGC() {
1621 sched.stwStoppingTimeGC.record(startTime)
1622 } else {
1623 sched.stwStoppingTimeOther.record(startTime)
1624 }
1625
1626
1627
1628
1629
1630 stoppingCPUTime := int64(0)
1631 bad := ""
1632 if sched.stopwait != 0 {
1633 bad = "stopTheWorld: not stopped (stopwait != 0)"
1634 } else {
1635 for _, pp := range allp {
1636 if pp.status != _Pgcstop {
1637 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1638 }
1639 if pp.gcStopTime == 0 && bad == "" {
1640 bad = "stopTheWorld: broken CPU time accounting"
1641 }
1642 stoppingCPUTime += finish - pp.gcStopTime
1643 pp.gcStopTime = 0
1644 }
1645 }
1646 if freezing.Load() {
1647
1648
1649
1650
1651 lock(&deadlock)
1652 lock(&deadlock)
1653 }
1654 if bad != "" {
1655 throw(bad)
1656 }
1657
1658 worldStopped()
1659
1660 return worldStop{
1661 reason: reason,
1662 startedStopping: start,
1663 finishedStopping: finish,
1664 stoppingCPUTime: stoppingCPUTime,
1665 }
1666 }
1667
1668
1669
1670
1671
1672
1673
1674 func startTheWorldWithSema(now int64, w worldStop) int64 {
1675 assertWorldStopped()
1676
1677 mp := acquirem()
1678 if netpollinited() {
1679 list, delta := netpoll(0)
1680 injectglist(&list)
1681 netpollAdjustWaiters(delta)
1682 }
1683 lock(&sched.lock)
1684
1685 procs := gomaxprocs
1686 if newprocs != 0 {
1687 procs = newprocs
1688 newprocs = 0
1689 }
1690 p1 := procresize(procs)
1691 sched.gcwaiting.Store(false)
1692 if sched.sysmonwait.Load() {
1693 sched.sysmonwait.Store(false)
1694 notewakeup(&sched.sysmonnote)
1695 }
1696 unlock(&sched.lock)
1697
1698 worldStarted()
1699
1700 for p1 != nil {
1701 p := p1
1702 p1 = p1.link.ptr()
1703 if p.m != 0 {
1704 mp := p.m.ptr()
1705 p.m = 0
1706 if mp.nextp != 0 {
1707 throw("startTheWorld: inconsistent mp->nextp")
1708 }
1709 mp.nextp.set(p)
1710 notewakeup(&mp.park)
1711 } else {
1712
1713 newm(nil, p, -1)
1714 }
1715 }
1716
1717
1718 if now == 0 {
1719 now = nanotime()
1720 }
1721 totalTime := now - w.startedStopping
1722 if w.reason.isGC() {
1723 sched.stwTotalTimeGC.record(totalTime)
1724 } else {
1725 sched.stwTotalTimeOther.record(totalTime)
1726 }
1727 trace := traceAcquire()
1728 if trace.ok() {
1729 trace.STWDone()
1730 traceRelease(trace)
1731 }
1732
1733
1734
1735
1736 wakep()
1737
1738 releasem(mp)
1739
1740 return now
1741 }
1742
1743
1744
1745 func usesLibcall() bool {
1746 switch GOOS {
1747 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1748 return true
1749 case "openbsd":
1750 return GOARCH != "mips64"
1751 }
1752 return false
1753 }
1754
1755
1756
1757 func mStackIsSystemAllocated() bool {
1758 switch GOOS {
1759 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1760 return true
1761 case "openbsd":
1762 return GOARCH != "mips64"
1763 }
1764 return false
1765 }
1766
1767
1768
1769 func mstart()
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780 func mstart0() {
1781 gp := getg()
1782
1783 osStack := gp.stack.lo == 0
1784 if osStack {
1785
1786
1787
1788
1789
1790
1791
1792
1793 size := gp.stack.hi
1794 if size == 0 {
1795 size = 16384 * sys.StackGuardMultiplier
1796 }
1797 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1798 gp.stack.lo = gp.stack.hi - size + 1024
1799 }
1800
1801
1802 gp.stackguard0 = gp.stack.lo + stackGuard
1803
1804
1805 gp.stackguard1 = gp.stackguard0
1806 mstart1()
1807
1808
1809 if mStackIsSystemAllocated() {
1810
1811
1812
1813 osStack = true
1814 }
1815 mexit(osStack)
1816 }
1817
1818
1819
1820
1821
1822 func mstart1() {
1823 gp := getg()
1824
1825 if gp != gp.m.g0 {
1826 throw("bad runtime·mstart")
1827 }
1828
1829
1830
1831
1832
1833
1834
1835 gp.sched.g = guintptr(unsafe.Pointer(gp))
1836 gp.sched.pc = sys.GetCallerPC()
1837 gp.sched.sp = sys.GetCallerSP()
1838
1839 asminit()
1840 minit()
1841
1842
1843
1844 if gp.m == &m0 {
1845 mstartm0()
1846 }
1847
1848 if fn := gp.m.mstartfn; fn != nil {
1849 fn()
1850 }
1851
1852 if gp.m != &m0 {
1853 acquirep(gp.m.nextp.ptr())
1854 gp.m.nextp = 0
1855 }
1856 schedule()
1857 }
1858
1859
1860
1861
1862
1863
1864
1865 func mstartm0() {
1866
1867
1868
1869 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1870 cgoHasExtraM = true
1871 newextram()
1872 }
1873 initsig(false)
1874 }
1875
1876
1877
1878
1879 func mPark() {
1880 gp := getg()
1881 notesleep(&gp.m.park)
1882 noteclear(&gp.m.park)
1883 }
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 func mexit(osStack bool) {
1896 mp := getg().m
1897
1898 if mp == &m0 {
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910 handoffp(releasep())
1911 lock(&sched.lock)
1912 sched.nmfreed++
1913 checkdead()
1914 unlock(&sched.lock)
1915 mPark()
1916 throw("locked m0 woke up")
1917 }
1918
1919 sigblock(true)
1920 unminit()
1921
1922
1923 if mp.gsignal != nil {
1924 stackfree(mp.gsignal.stack)
1925
1926
1927
1928
1929 mp.gsignal = nil
1930 }
1931
1932
1933 lock(&sched.lock)
1934 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1935 if *pprev == mp {
1936 *pprev = mp.alllink
1937 goto found
1938 }
1939 }
1940 throw("m not found in allm")
1941 found:
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 mp.freeWait.Store(freeMWait)
1957 mp.freelink = sched.freem
1958 sched.freem = mp
1959 unlock(&sched.lock)
1960
1961 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1962 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1963
1964
1965 handoffp(releasep())
1966
1967
1968
1969
1970
1971 lock(&sched.lock)
1972 sched.nmfreed++
1973 checkdead()
1974 unlock(&sched.lock)
1975
1976 if GOOS == "darwin" || GOOS == "ios" {
1977
1978
1979 if mp.signalPending.Load() != 0 {
1980 pendingPreemptSignals.Add(-1)
1981 }
1982 }
1983
1984
1985
1986 mdestroy(mp)
1987
1988 if osStack {
1989
1990 mp.freeWait.Store(freeMRef)
1991
1992
1993
1994 return
1995 }
1996
1997
1998
1999
2000
2001 exitThread(&mp.freeWait)
2002 }
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 func forEachP(reason waitReason, fn func(*p)) {
2015 systemstack(func() {
2016 gp := getg().m.curg
2017
2018
2019
2020
2021
2022
2023
2024
2025 casGToWaitingForGC(gp, _Grunning, reason)
2026 forEachPInternal(fn)
2027 casgstatus(gp, _Gwaiting, _Grunning)
2028 })
2029 }
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 func forEachPInternal(fn func(*p)) {
2041 mp := acquirem()
2042 pp := getg().m.p.ptr()
2043
2044 lock(&sched.lock)
2045 if sched.safePointWait != 0 {
2046 throw("forEachP: sched.safePointWait != 0")
2047 }
2048 sched.safePointWait = gomaxprocs - 1
2049 sched.safePointFn = fn
2050
2051
2052 for _, p2 := range allp {
2053 if p2 != pp {
2054 atomic.Store(&p2.runSafePointFn, 1)
2055 }
2056 }
2057 preemptall()
2058
2059
2060
2061
2062
2063
2064
2065 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2066 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2067 fn(p)
2068 sched.safePointWait--
2069 }
2070 }
2071
2072 wait := sched.safePointWait > 0
2073 unlock(&sched.lock)
2074
2075
2076 fn(pp)
2077
2078
2079
2080 for _, p2 := range allp {
2081 s := p2.status
2082
2083
2084
2085 trace := traceAcquire()
2086 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2087 if trace.ok() {
2088
2089 trace.ProcSteal(p2, false)
2090 traceRelease(trace)
2091 }
2092 p2.syscalltick++
2093 handoffp(p2)
2094 } else if trace.ok() {
2095 traceRelease(trace)
2096 }
2097 }
2098
2099
2100 if wait {
2101 for {
2102
2103
2104
2105
2106 if notetsleep(&sched.safePointNote, 100*1000) {
2107 noteclear(&sched.safePointNote)
2108 break
2109 }
2110 preemptall()
2111 }
2112 }
2113 if sched.safePointWait != 0 {
2114 throw("forEachP: not done")
2115 }
2116 for _, p2 := range allp {
2117 if p2.runSafePointFn != 0 {
2118 throw("forEachP: P did not run fn")
2119 }
2120 }
2121
2122 lock(&sched.lock)
2123 sched.safePointFn = nil
2124 unlock(&sched.lock)
2125 releasem(mp)
2126 }
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 func runSafePointFn() {
2140 p := getg().m.p.ptr()
2141
2142
2143
2144 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2145 return
2146 }
2147 sched.safePointFn(p)
2148 lock(&sched.lock)
2149 sched.safePointWait--
2150 if sched.safePointWait == 0 {
2151 notewakeup(&sched.safePointNote)
2152 }
2153 unlock(&sched.lock)
2154 }
2155
2156
2157
2158
2159 var cgoThreadStart unsafe.Pointer
2160
2161 type cgothreadstart struct {
2162 g guintptr
2163 tls *uint64
2164 fn unsafe.Pointer
2165 }
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 func allocm(pp *p, fn func(), id int64) *m {
2177 allocmLock.rlock()
2178
2179
2180
2181
2182 acquirem()
2183
2184 gp := getg()
2185 if gp.m.p == 0 {
2186 acquirep(pp)
2187 }
2188
2189
2190
2191 if sched.freem != nil {
2192 lock(&sched.lock)
2193 var newList *m
2194 for freem := sched.freem; freem != nil; {
2195
2196 wait := freem.freeWait.Load()
2197 if wait == freeMWait {
2198 next := freem.freelink
2199 freem.freelink = newList
2200 newList = freem
2201 freem = next
2202 continue
2203 }
2204
2205
2206
2207 if traceEnabled() || traceShuttingDown() {
2208 traceThreadDestroy(freem)
2209 }
2210
2211
2212
2213 if wait == freeMStack {
2214
2215
2216
2217 systemstack(func() {
2218 stackfree(freem.g0.stack)
2219 })
2220 }
2221 freem = freem.freelink
2222 }
2223 sched.freem = newList
2224 unlock(&sched.lock)
2225 }
2226
2227 mp := new(m)
2228 mp.mstartfn = fn
2229 mcommoninit(mp, id)
2230
2231
2232
2233 if iscgo || mStackIsSystemAllocated() {
2234 mp.g0 = malg(-1)
2235 } else {
2236 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2237 }
2238 mp.g0.m = mp
2239
2240 if pp == gp.m.p.ptr() {
2241 releasep()
2242 }
2243
2244 releasem(gp.m)
2245 allocmLock.runlock()
2246 return mp
2247 }
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 func needm(signal bool) {
2289 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2290
2291
2292
2293
2294
2295
2296 writeErrStr("fatal error: cgo callback before cgo call\n")
2297 exit(1)
2298 }
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 var sigmask sigset
2309 sigsave(&sigmask)
2310 sigblock(false)
2311
2312
2313
2314
2315 mp, last := getExtraM()
2316
2317
2318
2319
2320
2321
2322
2323
2324 mp.needextram = last
2325
2326
2327 mp.sigmask = sigmask
2328
2329
2330
2331 osSetupTLS(mp)
2332
2333
2334
2335 setg(mp.g0)
2336 sp := sys.GetCallerSP()
2337 callbackUpdateSystemStack(mp, sp, signal)
2338
2339
2340
2341
2342 mp.isExtraInC = false
2343
2344
2345 asminit()
2346 minit()
2347
2348
2349
2350
2351
2352
2353 var trace traceLocker
2354 if !signal {
2355 trace = traceAcquire()
2356 }
2357
2358
2359 casgstatus(mp.curg, _Gdead, _Gsyscall)
2360 sched.ngsys.Add(-1)
2361
2362 if !signal {
2363 if trace.ok() {
2364 trace.GoCreateSyscall(mp.curg)
2365 traceRelease(trace)
2366 }
2367 }
2368 mp.isExtraInSig = signal
2369 }
2370
2371
2372
2373
2374 func needAndBindM() {
2375 needm(false)
2376
2377 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2378 cgoBindM()
2379 }
2380 }
2381
2382
2383
2384
2385 func newextram() {
2386 c := extraMWaiters.Swap(0)
2387 if c > 0 {
2388 for i := uint32(0); i < c; i++ {
2389 oneNewExtraM()
2390 }
2391 } else if extraMLength.Load() == 0 {
2392
2393 oneNewExtraM()
2394 }
2395 }
2396
2397
2398 func oneNewExtraM() {
2399
2400
2401
2402
2403
2404 mp := allocm(nil, nil, -1)
2405 gp := malg(4096)
2406 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2407 gp.sched.sp = gp.stack.hi
2408 gp.sched.sp -= 4 * goarch.PtrSize
2409 gp.sched.lr = 0
2410 gp.sched.g = guintptr(unsafe.Pointer(gp))
2411 gp.syscallpc = gp.sched.pc
2412 gp.syscallsp = gp.sched.sp
2413 gp.stktopsp = gp.sched.sp
2414
2415
2416
2417
2418 casgstatus(gp, _Gidle, _Gdead)
2419 gp.m = mp
2420 mp.curg = gp
2421 mp.isextra = true
2422
2423 mp.isExtraInC = true
2424 mp.lockedInt++
2425 mp.lockedg.set(gp)
2426 gp.lockedm.set(mp)
2427 gp.goid = sched.goidgen.Add(1)
2428 if raceenabled {
2429 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2430 }
2431
2432 allgadd(gp)
2433
2434
2435
2436
2437
2438 sched.ngsys.Add(1)
2439
2440
2441 addExtraM(mp)
2442 }
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477 func dropm() {
2478
2479
2480
2481 mp := getg().m
2482
2483
2484
2485
2486
2487 var trace traceLocker
2488 if !mp.isExtraInSig {
2489 trace = traceAcquire()
2490 }
2491
2492
2493 casgstatus(mp.curg, _Gsyscall, _Gdead)
2494 mp.curg.preemptStop = false
2495 sched.ngsys.Add(1)
2496
2497 if !mp.isExtraInSig {
2498 if trace.ok() {
2499 trace.GoDestroySyscall()
2500 traceRelease(trace)
2501 }
2502 }
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 mp.syscalltick--
2518
2519
2520
2521 mp.curg.trace.reset()
2522
2523
2524
2525
2526 if traceEnabled() || traceShuttingDown() {
2527
2528
2529
2530
2531
2532
2533
2534 lock(&sched.lock)
2535 traceThreadDestroy(mp)
2536 unlock(&sched.lock)
2537 }
2538 mp.isExtraInSig = false
2539
2540
2541
2542
2543
2544 sigmask := mp.sigmask
2545 sigblock(false)
2546 unminit()
2547
2548 setg(nil)
2549
2550
2551
2552 g0 := mp.g0
2553 g0.stack.hi = 0
2554 g0.stack.lo = 0
2555 g0.stackguard0 = 0
2556 g0.stackguard1 = 0
2557 mp.g0StackAccurate = false
2558
2559 putExtraM(mp)
2560
2561 msigrestore(sigmask)
2562 }
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584 func cgoBindM() {
2585 if GOOS == "windows" || GOOS == "plan9" {
2586 fatal("bindm in unexpected GOOS")
2587 }
2588 g := getg()
2589 if g.m.g0 != g {
2590 fatal("the current g is not g0")
2591 }
2592 if _cgo_bindm != nil {
2593 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2594 }
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608 func getm() uintptr {
2609 return uintptr(unsafe.Pointer(getg().m))
2610 }
2611
2612 var (
2613
2614
2615
2616
2617
2618
2619 extraM atomic.Uintptr
2620
2621 extraMLength atomic.Uint32
2622
2623 extraMWaiters atomic.Uint32
2624
2625
2626 extraMInUse atomic.Uint32
2627 )
2628
2629
2630
2631
2632
2633
2634
2635
2636 func lockextra(nilokay bool) *m {
2637 const locked = 1
2638
2639 incr := false
2640 for {
2641 old := extraM.Load()
2642 if old == locked {
2643 osyield_no_g()
2644 continue
2645 }
2646 if old == 0 && !nilokay {
2647 if !incr {
2648
2649
2650
2651 extraMWaiters.Add(1)
2652 incr = true
2653 }
2654 usleep_no_g(1)
2655 continue
2656 }
2657 if extraM.CompareAndSwap(old, locked) {
2658 return (*m)(unsafe.Pointer(old))
2659 }
2660 osyield_no_g()
2661 continue
2662 }
2663 }
2664
2665
2666 func unlockextra(mp *m, delta int32) {
2667 extraMLength.Add(delta)
2668 extraM.Store(uintptr(unsafe.Pointer(mp)))
2669 }
2670
2671
2672
2673
2674
2675
2676
2677
2678 func getExtraM() (mp *m, last bool) {
2679 mp = lockextra(false)
2680 extraMInUse.Add(1)
2681 unlockextra(mp.schedlink.ptr(), -1)
2682 return mp, mp.schedlink.ptr() == nil
2683 }
2684
2685
2686
2687
2688
2689 func putExtraM(mp *m) {
2690 extraMInUse.Add(-1)
2691 addExtraM(mp)
2692 }
2693
2694
2695
2696
2697 func addExtraM(mp *m) {
2698 mnext := lockextra(true)
2699 mp.schedlink.set(mnext)
2700 unlockextra(mp, 1)
2701 }
2702
2703 var (
2704
2705
2706
2707 allocmLock rwmutex
2708
2709
2710
2711
2712 execLock rwmutex
2713 )
2714
2715
2716
2717 const (
2718 failthreadcreate = "runtime: failed to create new OS thread\n"
2719 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2720 )
2721
2722
2723
2724
2725 var newmHandoff struct {
2726 lock mutex
2727
2728
2729
2730 newm muintptr
2731
2732
2733
2734 waiting bool
2735 wake note
2736
2737
2738
2739
2740 haveTemplateThread uint32
2741 }
2742
2743
2744
2745
2746
2747
2748
2749
2750 func newm(fn func(), pp *p, id int64) {
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761 acquirem()
2762
2763 mp := allocm(pp, fn, id)
2764 mp.nextp.set(pp)
2765 mp.sigmask = initSigmask
2766 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 lock(&newmHandoff.lock)
2779 if newmHandoff.haveTemplateThread == 0 {
2780 throw("on a locked thread with no template thread")
2781 }
2782 mp.schedlink = newmHandoff.newm
2783 newmHandoff.newm.set(mp)
2784 if newmHandoff.waiting {
2785 newmHandoff.waiting = false
2786 notewakeup(&newmHandoff.wake)
2787 }
2788 unlock(&newmHandoff.lock)
2789
2790
2791
2792 releasem(getg().m)
2793 return
2794 }
2795 newm1(mp)
2796 releasem(getg().m)
2797 }
2798
2799 func newm1(mp *m) {
2800 if iscgo {
2801 var ts cgothreadstart
2802 if _cgo_thread_start == nil {
2803 throw("_cgo_thread_start missing")
2804 }
2805 ts.g.set(mp.g0)
2806 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2807 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2808 if msanenabled {
2809 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2810 }
2811 if asanenabled {
2812 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2813 }
2814 execLock.rlock()
2815 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2816 execLock.runlock()
2817 return
2818 }
2819 execLock.rlock()
2820 newosproc(mp)
2821 execLock.runlock()
2822 }
2823
2824
2825
2826
2827
2828 func startTemplateThread() {
2829 if GOARCH == "wasm" {
2830 return
2831 }
2832
2833
2834
2835 mp := acquirem()
2836 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2837 releasem(mp)
2838 return
2839 }
2840 newm(templateThread, nil, -1)
2841 releasem(mp)
2842 }
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856 func templateThread() {
2857 lock(&sched.lock)
2858 sched.nmsys++
2859 checkdead()
2860 unlock(&sched.lock)
2861
2862 for {
2863 lock(&newmHandoff.lock)
2864 for newmHandoff.newm != 0 {
2865 newm := newmHandoff.newm.ptr()
2866 newmHandoff.newm = 0
2867 unlock(&newmHandoff.lock)
2868 for newm != nil {
2869 next := newm.schedlink.ptr()
2870 newm.schedlink = 0
2871 newm1(newm)
2872 newm = next
2873 }
2874 lock(&newmHandoff.lock)
2875 }
2876 newmHandoff.waiting = true
2877 noteclear(&newmHandoff.wake)
2878 unlock(&newmHandoff.lock)
2879 notesleep(&newmHandoff.wake)
2880 }
2881 }
2882
2883
2884
2885 func stopm() {
2886 gp := getg()
2887
2888 if gp.m.locks != 0 {
2889 throw("stopm holding locks")
2890 }
2891 if gp.m.p != 0 {
2892 throw("stopm holding p")
2893 }
2894 if gp.m.spinning {
2895 throw("stopm spinning")
2896 }
2897
2898 lock(&sched.lock)
2899 mput(gp.m)
2900 unlock(&sched.lock)
2901 mPark()
2902 acquirep(gp.m.nextp.ptr())
2903 gp.m.nextp = 0
2904 }
2905
2906 func mspinning() {
2907
2908 getg().m.spinning = true
2909 }
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928 func startm(pp *p, spinning, lockheld bool) {
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 mp := acquirem()
2946 if !lockheld {
2947 lock(&sched.lock)
2948 }
2949 if pp == nil {
2950 if spinning {
2951
2952
2953
2954 throw("startm: P required for spinning=true")
2955 }
2956 pp, _ = pidleget(0)
2957 if pp == nil {
2958 if !lockheld {
2959 unlock(&sched.lock)
2960 }
2961 releasem(mp)
2962 return
2963 }
2964 }
2965 nmp := mget()
2966 if nmp == nil {
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981 id := mReserveID()
2982 unlock(&sched.lock)
2983
2984 var fn func()
2985 if spinning {
2986
2987 fn = mspinning
2988 }
2989 newm(fn, pp, id)
2990
2991 if lockheld {
2992 lock(&sched.lock)
2993 }
2994
2995
2996 releasem(mp)
2997 return
2998 }
2999 if !lockheld {
3000 unlock(&sched.lock)
3001 }
3002 if nmp.spinning {
3003 throw("startm: m is spinning")
3004 }
3005 if nmp.nextp != 0 {
3006 throw("startm: m has p")
3007 }
3008 if spinning && !runqempty(pp) {
3009 throw("startm: p has runnable gs")
3010 }
3011
3012 nmp.spinning = spinning
3013 nmp.nextp.set(pp)
3014 notewakeup(&nmp.park)
3015
3016
3017 releasem(mp)
3018 }
3019
3020
3021
3022
3023
3024 func handoffp(pp *p) {
3025
3026
3027
3028
3029 if !runqempty(pp) || sched.runqsize != 0 {
3030 startm(pp, false, false)
3031 return
3032 }
3033
3034 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3035 startm(pp, false, false)
3036 return
3037 }
3038
3039 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3040 startm(pp, false, false)
3041 return
3042 }
3043
3044
3045 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3046 sched.needspinning.Store(0)
3047 startm(pp, true, false)
3048 return
3049 }
3050 lock(&sched.lock)
3051 if sched.gcwaiting.Load() {
3052 pp.status = _Pgcstop
3053 pp.gcStopTime = nanotime()
3054 sched.stopwait--
3055 if sched.stopwait == 0 {
3056 notewakeup(&sched.stopnote)
3057 }
3058 unlock(&sched.lock)
3059 return
3060 }
3061 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3062 sched.safePointFn(pp)
3063 sched.safePointWait--
3064 if sched.safePointWait == 0 {
3065 notewakeup(&sched.safePointNote)
3066 }
3067 }
3068 if sched.runqsize != 0 {
3069 unlock(&sched.lock)
3070 startm(pp, false, false)
3071 return
3072 }
3073
3074
3075 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3076 unlock(&sched.lock)
3077 startm(pp, false, false)
3078 return
3079 }
3080
3081
3082
3083 when := pp.timers.wakeTime()
3084 pidleput(pp, 0)
3085 unlock(&sched.lock)
3086
3087 if when != 0 {
3088 wakeNetPoller(when)
3089 }
3090 }
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105 func wakep() {
3106
3107
3108 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3109 return
3110 }
3111
3112
3113
3114
3115
3116
3117 mp := acquirem()
3118
3119 var pp *p
3120 lock(&sched.lock)
3121 pp, _ = pidlegetSpinning(0)
3122 if pp == nil {
3123 if sched.nmspinning.Add(-1) < 0 {
3124 throw("wakep: negative nmspinning")
3125 }
3126 unlock(&sched.lock)
3127 releasem(mp)
3128 return
3129 }
3130
3131
3132
3133
3134 unlock(&sched.lock)
3135
3136 startm(pp, true, false)
3137
3138 releasem(mp)
3139 }
3140
3141
3142
3143 func stoplockedm() {
3144 gp := getg()
3145
3146 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3147 throw("stoplockedm: inconsistent locking")
3148 }
3149 if gp.m.p != 0 {
3150
3151 pp := releasep()
3152 handoffp(pp)
3153 }
3154 incidlelocked(1)
3155
3156 mPark()
3157 status := readgstatus(gp.m.lockedg.ptr())
3158 if status&^_Gscan != _Grunnable {
3159 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3160 dumpgstatus(gp.m.lockedg.ptr())
3161 throw("stoplockedm: not runnable")
3162 }
3163 acquirep(gp.m.nextp.ptr())
3164 gp.m.nextp = 0
3165 }
3166
3167
3168
3169
3170
3171 func startlockedm(gp *g) {
3172 mp := gp.lockedm.ptr()
3173 if mp == getg().m {
3174 throw("startlockedm: locked to me")
3175 }
3176 if mp.nextp != 0 {
3177 throw("startlockedm: m has p")
3178 }
3179
3180 incidlelocked(-1)
3181 pp := releasep()
3182 mp.nextp.set(pp)
3183 notewakeup(&mp.park)
3184 stopm()
3185 }
3186
3187
3188
3189 func gcstopm() {
3190 gp := getg()
3191
3192 if !sched.gcwaiting.Load() {
3193 throw("gcstopm: not waiting for gc")
3194 }
3195 if gp.m.spinning {
3196 gp.m.spinning = false
3197
3198
3199 if sched.nmspinning.Add(-1) < 0 {
3200 throw("gcstopm: negative nmspinning")
3201 }
3202 }
3203 pp := releasep()
3204 lock(&sched.lock)
3205 pp.status = _Pgcstop
3206 pp.gcStopTime = nanotime()
3207 sched.stopwait--
3208 if sched.stopwait == 0 {
3209 notewakeup(&sched.stopnote)
3210 }
3211 unlock(&sched.lock)
3212 stopm()
3213 }
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 func execute(gp *g, inheritTime bool) {
3225 mp := getg().m
3226
3227 if goroutineProfile.active {
3228
3229
3230
3231 tryRecordGoroutineProfile(gp, nil, osyield)
3232 }
3233
3234
3235
3236 mp.curg = gp
3237 gp.m = mp
3238 casgstatus(gp, _Grunnable, _Grunning)
3239 gp.waitsince = 0
3240 gp.preempt = false
3241 gp.stackguard0 = gp.stack.lo + stackGuard
3242 if !inheritTime {
3243 mp.p.ptr().schedtick++
3244 }
3245
3246
3247 hz := sched.profilehz
3248 if mp.profilehz != hz {
3249 setThreadCPUProfiler(hz)
3250 }
3251
3252 trace := traceAcquire()
3253 if trace.ok() {
3254 trace.GoStart()
3255 traceRelease(trace)
3256 }
3257
3258 gogo(&gp.sched)
3259 }
3260
3261
3262
3263
3264
3265 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3266 mp := getg().m
3267
3268
3269
3270
3271
3272 top:
3273 pp := mp.p.ptr()
3274 if sched.gcwaiting.Load() {
3275 gcstopm()
3276 goto top
3277 }
3278 if pp.runSafePointFn != 0 {
3279 runSafePointFn()
3280 }
3281
3282
3283
3284
3285
3286 now, pollUntil, _ := pp.timers.check(0)
3287
3288
3289 if traceEnabled() || traceShuttingDown() {
3290 gp := traceReader()
3291 if gp != nil {
3292 trace := traceAcquire()
3293 casgstatus(gp, _Gwaiting, _Grunnable)
3294 if trace.ok() {
3295 trace.GoUnpark(gp, 0)
3296 traceRelease(trace)
3297 }
3298 return gp, false, true
3299 }
3300 }
3301
3302
3303 if gcBlackenEnabled != 0 {
3304 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3305 if gp != nil {
3306 return gp, false, true
3307 }
3308 now = tnow
3309 }
3310
3311
3312
3313
3314 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3315 lock(&sched.lock)
3316 gp := globrunqget(pp, 1)
3317 unlock(&sched.lock)
3318 if gp != nil {
3319 return gp, false, false
3320 }
3321 }
3322
3323
3324 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3325 if gp := wakefing(); gp != nil {
3326 ready(gp, 0, true)
3327 }
3328 }
3329 if *cgo_yield != nil {
3330 asmcgocall(*cgo_yield, nil)
3331 }
3332
3333
3334 if gp, inheritTime := runqget(pp); gp != nil {
3335 return gp, inheritTime, false
3336 }
3337
3338
3339 if sched.runqsize != 0 {
3340 lock(&sched.lock)
3341 gp := globrunqget(pp, 0)
3342 unlock(&sched.lock)
3343 if gp != nil {
3344 return gp, false, false
3345 }
3346 }
3347
3348
3349
3350
3351
3352
3353
3354
3355 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3356 if list, delta := netpoll(0); !list.empty() {
3357 gp := list.pop()
3358 injectglist(&list)
3359 netpollAdjustWaiters(delta)
3360 trace := traceAcquire()
3361 casgstatus(gp, _Gwaiting, _Grunnable)
3362 if trace.ok() {
3363 trace.GoUnpark(gp, 0)
3364 traceRelease(trace)
3365 }
3366 return gp, false, false
3367 }
3368 }
3369
3370
3371
3372
3373
3374
3375 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3376 if !mp.spinning {
3377 mp.becomeSpinning()
3378 }
3379
3380 gp, inheritTime, tnow, w, newWork := stealWork(now)
3381 if gp != nil {
3382
3383 return gp, inheritTime, false
3384 }
3385 if newWork {
3386
3387
3388 goto top
3389 }
3390
3391 now = tnow
3392 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3393
3394 pollUntil = w
3395 }
3396 }
3397
3398
3399
3400
3401
3402 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3403 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3404 if node != nil {
3405 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3406 gp := node.gp.ptr()
3407
3408 trace := traceAcquire()
3409 casgstatus(gp, _Gwaiting, _Grunnable)
3410 if trace.ok() {
3411 trace.GoUnpark(gp, 0)
3412 traceRelease(trace)
3413 }
3414 return gp, false, false
3415 }
3416 gcController.removeIdleMarkWorker()
3417 }
3418
3419
3420
3421
3422
3423 gp, otherReady := beforeIdle(now, pollUntil)
3424 if gp != nil {
3425 trace := traceAcquire()
3426 casgstatus(gp, _Gwaiting, _Grunnable)
3427 if trace.ok() {
3428 trace.GoUnpark(gp, 0)
3429 traceRelease(trace)
3430 }
3431 return gp, false, false
3432 }
3433 if otherReady {
3434 goto top
3435 }
3436
3437
3438
3439
3440
3441 allpSnapshot := allp
3442
3443
3444 idlepMaskSnapshot := idlepMask
3445 timerpMaskSnapshot := timerpMask
3446
3447
3448 lock(&sched.lock)
3449 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3450 unlock(&sched.lock)
3451 goto top
3452 }
3453 if sched.runqsize != 0 {
3454 gp := globrunqget(pp, 0)
3455 unlock(&sched.lock)
3456 return gp, false, false
3457 }
3458 if !mp.spinning && sched.needspinning.Load() == 1 {
3459
3460 mp.becomeSpinning()
3461 unlock(&sched.lock)
3462 goto top
3463 }
3464 if releasep() != pp {
3465 throw("findrunnable: wrong p")
3466 }
3467 now = pidleput(pp, now)
3468 unlock(&sched.lock)
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506 wasSpinning := mp.spinning
3507 if mp.spinning {
3508 mp.spinning = false
3509 if sched.nmspinning.Add(-1) < 0 {
3510 throw("findrunnable: negative nmspinning")
3511 }
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524 lock(&sched.lock)
3525 if sched.runqsize != 0 {
3526 pp, _ := pidlegetSpinning(0)
3527 if pp != nil {
3528 gp := globrunqget(pp, 0)
3529 if gp == nil {
3530 throw("global runq empty with non-zero runqsize")
3531 }
3532 unlock(&sched.lock)
3533 acquirep(pp)
3534 mp.becomeSpinning()
3535 return gp, false, false
3536 }
3537 }
3538 unlock(&sched.lock)
3539
3540 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3541 if pp != nil {
3542 acquirep(pp)
3543 mp.becomeSpinning()
3544 goto top
3545 }
3546
3547
3548 pp, gp := checkIdleGCNoP()
3549 if pp != nil {
3550 acquirep(pp)
3551 mp.becomeSpinning()
3552
3553
3554 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3555 trace := traceAcquire()
3556 casgstatus(gp, _Gwaiting, _Grunnable)
3557 if trace.ok() {
3558 trace.GoUnpark(gp, 0)
3559 traceRelease(trace)
3560 }
3561 return gp, false, false
3562 }
3563
3564
3565
3566
3567
3568
3569
3570 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3571 }
3572
3573
3574 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3575 sched.pollUntil.Store(pollUntil)
3576 if mp.p != 0 {
3577 throw("findrunnable: netpoll with p")
3578 }
3579 if mp.spinning {
3580 throw("findrunnable: netpoll with spinning")
3581 }
3582 delay := int64(-1)
3583 if pollUntil != 0 {
3584 if now == 0 {
3585 now = nanotime()
3586 }
3587 delay = pollUntil - now
3588 if delay < 0 {
3589 delay = 0
3590 }
3591 }
3592 if faketime != 0 {
3593
3594 delay = 0
3595 }
3596 list, delta := netpoll(delay)
3597
3598 now = nanotime()
3599 sched.pollUntil.Store(0)
3600 sched.lastpoll.Store(now)
3601 if faketime != 0 && list.empty() {
3602
3603
3604 stopm()
3605 goto top
3606 }
3607 lock(&sched.lock)
3608 pp, _ := pidleget(now)
3609 unlock(&sched.lock)
3610 if pp == nil {
3611 injectglist(&list)
3612 netpollAdjustWaiters(delta)
3613 } else {
3614 acquirep(pp)
3615 if !list.empty() {
3616 gp := list.pop()
3617 injectglist(&list)
3618 netpollAdjustWaiters(delta)
3619 trace := traceAcquire()
3620 casgstatus(gp, _Gwaiting, _Grunnable)
3621 if trace.ok() {
3622 trace.GoUnpark(gp, 0)
3623 traceRelease(trace)
3624 }
3625 return gp, false, false
3626 }
3627 if wasSpinning {
3628 mp.becomeSpinning()
3629 }
3630 goto top
3631 }
3632 } else if pollUntil != 0 && netpollinited() {
3633 pollerPollUntil := sched.pollUntil.Load()
3634 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3635 netpollBreak()
3636 }
3637 }
3638 stopm()
3639 goto top
3640 }
3641
3642
3643
3644
3645
3646 func pollWork() bool {
3647 if sched.runqsize != 0 {
3648 return true
3649 }
3650 p := getg().m.p.ptr()
3651 if !runqempty(p) {
3652 return true
3653 }
3654 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3655 if list, delta := netpoll(0); !list.empty() {
3656 injectglist(&list)
3657 netpollAdjustWaiters(delta)
3658 return true
3659 }
3660 }
3661 return false
3662 }
3663
3664
3665
3666
3667
3668
3669
3670 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3671 pp := getg().m.p.ptr()
3672
3673 ranTimer := false
3674
3675 const stealTries = 4
3676 for i := 0; i < stealTries; i++ {
3677 stealTimersOrRunNextG := i == stealTries-1
3678
3679 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3680 if sched.gcwaiting.Load() {
3681
3682 return nil, false, now, pollUntil, true
3683 }
3684 p2 := allp[enum.position()]
3685 if pp == p2 {
3686 continue
3687 }
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3703 tnow, w, ran := p2.timers.check(now)
3704 now = tnow
3705 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3706 pollUntil = w
3707 }
3708 if ran {
3709
3710
3711
3712
3713
3714
3715
3716
3717 if gp, inheritTime := runqget(pp); gp != nil {
3718 return gp, inheritTime, now, pollUntil, ranTimer
3719 }
3720 ranTimer = true
3721 }
3722 }
3723
3724
3725 if !idlepMask.read(enum.position()) {
3726 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3727 return gp, false, now, pollUntil, ranTimer
3728 }
3729 }
3730 }
3731 }
3732
3733
3734
3735
3736 return nil, false, now, pollUntil, ranTimer
3737 }
3738
3739
3740
3741
3742
3743
3744 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3745 for id, p2 := range allpSnapshot {
3746 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3747 lock(&sched.lock)
3748 pp, _ := pidlegetSpinning(0)
3749 if pp == nil {
3750
3751 unlock(&sched.lock)
3752 return nil
3753 }
3754 unlock(&sched.lock)
3755 return pp
3756 }
3757 }
3758
3759
3760 return nil
3761 }
3762
3763
3764
3765
3766 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3767 for id, p2 := range allpSnapshot {
3768 if timerpMaskSnapshot.read(uint32(id)) {
3769 w := p2.timers.wakeTime()
3770 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3771 pollUntil = w
3772 }
3773 }
3774 }
3775
3776 return pollUntil
3777 }
3778
3779
3780
3781
3782
3783 func checkIdleGCNoP() (*p, *g) {
3784
3785
3786
3787
3788
3789
3790 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3791 return nil, nil
3792 }
3793 if !gcMarkWorkAvailable(nil) {
3794 return nil, nil
3795 }
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814 lock(&sched.lock)
3815 pp, now := pidlegetSpinning(0)
3816 if pp == nil {
3817 unlock(&sched.lock)
3818 return nil, nil
3819 }
3820
3821
3822 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3823 pidleput(pp, now)
3824 unlock(&sched.lock)
3825 return nil, nil
3826 }
3827
3828 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3829 if node == nil {
3830 pidleput(pp, now)
3831 unlock(&sched.lock)
3832 gcController.removeIdleMarkWorker()
3833 return nil, nil
3834 }
3835
3836 unlock(&sched.lock)
3837
3838 return pp, node.gp.ptr()
3839 }
3840
3841
3842
3843
3844 func wakeNetPoller(when int64) {
3845 if sched.lastpoll.Load() == 0 {
3846
3847
3848
3849
3850 pollerPollUntil := sched.pollUntil.Load()
3851 if pollerPollUntil == 0 || pollerPollUntil > when {
3852 netpollBreak()
3853 }
3854 } else {
3855
3856
3857 if GOOS != "plan9" {
3858 wakep()
3859 }
3860 }
3861 }
3862
3863 func resetspinning() {
3864 gp := getg()
3865 if !gp.m.spinning {
3866 throw("resetspinning: not a spinning m")
3867 }
3868 gp.m.spinning = false
3869 nmspinning := sched.nmspinning.Add(-1)
3870 if nmspinning < 0 {
3871 throw("findrunnable: negative nmspinning")
3872 }
3873
3874
3875
3876 wakep()
3877 }
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887 func injectglist(glist *gList) {
3888 if glist.empty() {
3889 return
3890 }
3891 trace := traceAcquire()
3892 if trace.ok() {
3893 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3894 trace.GoUnpark(gp, 0)
3895 }
3896 traceRelease(trace)
3897 }
3898
3899
3900
3901 head := glist.head.ptr()
3902 var tail *g
3903 qsize := 0
3904 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3905 tail = gp
3906 qsize++
3907 casgstatus(gp, _Gwaiting, _Grunnable)
3908 }
3909
3910
3911 var q gQueue
3912 q.head.set(head)
3913 q.tail.set(tail)
3914 *glist = gList{}
3915
3916 startIdle := func(n int) {
3917 for i := 0; i < n; i++ {
3918 mp := acquirem()
3919 lock(&sched.lock)
3920
3921 pp, _ := pidlegetSpinning(0)
3922 if pp == nil {
3923 unlock(&sched.lock)
3924 releasem(mp)
3925 break
3926 }
3927
3928 startm(pp, false, true)
3929 unlock(&sched.lock)
3930 releasem(mp)
3931 }
3932 }
3933
3934 pp := getg().m.p.ptr()
3935 if pp == nil {
3936 lock(&sched.lock)
3937 globrunqputbatch(&q, int32(qsize))
3938 unlock(&sched.lock)
3939 startIdle(qsize)
3940 return
3941 }
3942
3943 npidle := int(sched.npidle.Load())
3944 var (
3945 globq gQueue
3946 n int
3947 )
3948 for n = 0; n < npidle && !q.empty(); n++ {
3949 g := q.pop()
3950 globq.pushBack(g)
3951 }
3952 if n > 0 {
3953 lock(&sched.lock)
3954 globrunqputbatch(&globq, int32(n))
3955 unlock(&sched.lock)
3956 startIdle(n)
3957 qsize -= n
3958 }
3959
3960 if !q.empty() {
3961 runqputbatch(pp, &q, qsize)
3962 }
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977 wakep()
3978 }
3979
3980
3981
3982 func schedule() {
3983 mp := getg().m
3984
3985 if mp.locks != 0 {
3986 throw("schedule: holding locks")
3987 }
3988
3989 if mp.lockedg != 0 {
3990 stoplockedm()
3991 execute(mp.lockedg.ptr(), false)
3992 }
3993
3994
3995
3996 if mp.incgo {
3997 throw("schedule: in cgo")
3998 }
3999
4000 top:
4001 pp := mp.p.ptr()
4002 pp.preempt = false
4003
4004
4005
4006
4007 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4008 throw("schedule: spinning with local work")
4009 }
4010
4011 gp, inheritTime, tryWakeP := findRunnable()
4012
4013 if debug.dontfreezetheworld > 0 && freezing.Load() {
4014
4015
4016
4017
4018
4019
4020
4021 lock(&deadlock)
4022 lock(&deadlock)
4023 }
4024
4025
4026
4027
4028 if mp.spinning {
4029 resetspinning()
4030 }
4031
4032 if sched.disable.user && !schedEnabled(gp) {
4033
4034
4035
4036 lock(&sched.lock)
4037 if schedEnabled(gp) {
4038
4039
4040 unlock(&sched.lock)
4041 } else {
4042 sched.disable.runnable.pushBack(gp)
4043 sched.disable.n++
4044 unlock(&sched.lock)
4045 goto top
4046 }
4047 }
4048
4049
4050
4051 if tryWakeP {
4052 wakep()
4053 }
4054 if gp.lockedm != 0 {
4055
4056
4057 startlockedm(gp)
4058 goto top
4059 }
4060
4061 execute(gp, inheritTime)
4062 }
4063
4064
4065
4066
4067
4068
4069
4070
4071 func dropg() {
4072 gp := getg()
4073
4074 setMNoWB(&gp.m.curg.m, nil)
4075 setGNoWB(&gp.m.curg, nil)
4076 }
4077
4078 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4079 unlock((*mutex)(lock))
4080 return true
4081 }
4082
4083
4084 func park_m(gp *g) {
4085 mp := getg().m
4086
4087 trace := traceAcquire()
4088
4089 if trace.ok() {
4090
4091
4092
4093 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4094 }
4095
4096
4097 casgstatus(gp, _Grunning, _Gwaiting)
4098 if trace.ok() {
4099 traceRelease(trace)
4100 }
4101
4102 dropg()
4103
4104 if fn := mp.waitunlockf; fn != nil {
4105 ok := fn(gp, mp.waitlock)
4106 mp.waitunlockf = nil
4107 mp.waitlock = nil
4108 if !ok {
4109 trace := traceAcquire()
4110 casgstatus(gp, _Gwaiting, _Grunnable)
4111 if trace.ok() {
4112 trace.GoUnpark(gp, 2)
4113 traceRelease(trace)
4114 }
4115 execute(gp, true)
4116 }
4117 }
4118 schedule()
4119 }
4120
4121 func goschedImpl(gp *g, preempted bool) {
4122 trace := traceAcquire()
4123 status := readgstatus(gp)
4124 if status&^_Gscan != _Grunning {
4125 dumpgstatus(gp)
4126 throw("bad g status")
4127 }
4128 if trace.ok() {
4129
4130
4131
4132 if preempted {
4133 trace.GoPreempt()
4134 } else {
4135 trace.GoSched()
4136 }
4137 }
4138 casgstatus(gp, _Grunning, _Grunnable)
4139 if trace.ok() {
4140 traceRelease(trace)
4141 }
4142
4143 dropg()
4144 lock(&sched.lock)
4145 globrunqput(gp)
4146 unlock(&sched.lock)
4147
4148 if mainStarted {
4149 wakep()
4150 }
4151
4152 schedule()
4153 }
4154
4155
4156 func gosched_m(gp *g) {
4157 goschedImpl(gp, false)
4158 }
4159
4160
4161 func goschedguarded_m(gp *g) {
4162 if !canPreemptM(gp.m) {
4163 gogo(&gp.sched)
4164 }
4165 goschedImpl(gp, false)
4166 }
4167
4168 func gopreempt_m(gp *g) {
4169 goschedImpl(gp, true)
4170 }
4171
4172
4173
4174
4175 func preemptPark(gp *g) {
4176 status := readgstatus(gp)
4177 if status&^_Gscan != _Grunning {
4178 dumpgstatus(gp)
4179 throw("bad g status")
4180 }
4181
4182 if gp.asyncSafePoint {
4183
4184
4185
4186 f := findfunc(gp.sched.pc)
4187 if !f.valid() {
4188 throw("preempt at unknown pc")
4189 }
4190 if f.flag&abi.FuncFlagSPWrite != 0 {
4191 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4192 throw("preempt SPWRITE")
4193 }
4194 }
4195
4196
4197
4198
4199
4200
4201
4202 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4203 dropg()
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220 trace := traceAcquire()
4221 if trace.ok() {
4222 trace.GoPark(traceBlockPreempted, 0)
4223 }
4224 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4225 if trace.ok() {
4226 traceRelease(trace)
4227 }
4228 schedule()
4229 }
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245 func goyield() {
4246 checkTimeouts()
4247 mcall(goyield_m)
4248 }
4249
4250 func goyield_m(gp *g) {
4251 trace := traceAcquire()
4252 pp := gp.m.p.ptr()
4253 if trace.ok() {
4254
4255
4256
4257 trace.GoPreempt()
4258 }
4259 casgstatus(gp, _Grunning, _Grunnable)
4260 if trace.ok() {
4261 traceRelease(trace)
4262 }
4263 dropg()
4264 runqput(pp, gp, false)
4265 schedule()
4266 }
4267
4268
4269 func goexit1() {
4270 if raceenabled {
4271 racegoend()
4272 }
4273 trace := traceAcquire()
4274 if trace.ok() {
4275 trace.GoEnd()
4276 traceRelease(trace)
4277 }
4278 mcall(goexit0)
4279 }
4280
4281
4282 func goexit0(gp *g) {
4283 gdestroy(gp)
4284 schedule()
4285 }
4286
4287 func gdestroy(gp *g) {
4288 mp := getg().m
4289 pp := mp.p.ptr()
4290
4291 casgstatus(gp, _Grunning, _Gdead)
4292 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4293 if isSystemGoroutine(gp, false) {
4294 sched.ngsys.Add(-1)
4295 }
4296 gp.m = nil
4297 locked := gp.lockedm != 0
4298 gp.lockedm = 0
4299 mp.lockedg = 0
4300 gp.preemptStop = false
4301 gp.paniconfault = false
4302 gp._defer = nil
4303 gp._panic = nil
4304 gp.writebuf = nil
4305 gp.waitreason = waitReasonZero
4306 gp.param = nil
4307 gp.labels = nil
4308 gp.timer = nil
4309
4310 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4311
4312
4313
4314 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4315 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4316 gcController.bgScanCredit.Add(scanCredit)
4317 gp.gcAssistBytes = 0
4318 }
4319
4320 dropg()
4321
4322 if GOARCH == "wasm" {
4323 gfput(pp, gp)
4324 return
4325 }
4326
4327 if locked && mp.lockedInt != 0 {
4328 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4329 if mp.isextra {
4330 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4331 }
4332 throw("exited a goroutine internally locked to the OS thread")
4333 }
4334 gfput(pp, gp)
4335 if locked {
4336
4337
4338
4339
4340
4341
4342 if GOOS != "plan9" {
4343 gogo(&mp.g0.sched)
4344 } else {
4345
4346
4347 mp.lockedExt = 0
4348 }
4349 }
4350 }
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360 func save(pc, sp, bp uintptr) {
4361 gp := getg()
4362
4363 if gp == gp.m.g0 || gp == gp.m.gsignal {
4364
4365
4366
4367
4368
4369 throw("save on system g not allowed")
4370 }
4371
4372 gp.sched.pc = pc
4373 gp.sched.sp = sp
4374 gp.sched.lr = 0
4375 gp.sched.ret = 0
4376 gp.sched.bp = bp
4377
4378
4379
4380 if gp.sched.ctxt != nil {
4381 badctxt()
4382 }
4383 }
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409 func reentersyscall(pc, sp, bp uintptr) {
4410 trace := traceAcquire()
4411 gp := getg()
4412
4413
4414
4415 gp.m.locks++
4416
4417
4418
4419
4420
4421 gp.stackguard0 = stackPreempt
4422 gp.throwsplit = true
4423
4424
4425 save(pc, sp, bp)
4426 gp.syscallsp = sp
4427 gp.syscallpc = pc
4428 gp.syscallbp = bp
4429 casgstatus(gp, _Grunning, _Gsyscall)
4430 if staticLockRanking {
4431
4432
4433 save(pc, sp, bp)
4434 }
4435 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4436 systemstack(func() {
4437 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4438 throw("entersyscall")
4439 })
4440 }
4441 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4442 systemstack(func() {
4443 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4444 throw("entersyscall")
4445 })
4446 }
4447
4448 if trace.ok() {
4449 systemstack(func() {
4450 trace.GoSysCall()
4451 traceRelease(trace)
4452 })
4453
4454
4455
4456 save(pc, sp, bp)
4457 }
4458
4459 if sched.sysmonwait.Load() {
4460 systemstack(entersyscall_sysmon)
4461 save(pc, sp, bp)
4462 }
4463
4464 if gp.m.p.ptr().runSafePointFn != 0 {
4465
4466 systemstack(runSafePointFn)
4467 save(pc, sp, bp)
4468 }
4469
4470 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4471 pp := gp.m.p.ptr()
4472 pp.m = 0
4473 gp.m.oldp.set(pp)
4474 gp.m.p = 0
4475 atomic.Store(&pp.status, _Psyscall)
4476 if sched.gcwaiting.Load() {
4477 systemstack(entersyscall_gcwait)
4478 save(pc, sp, bp)
4479 }
4480
4481 gp.m.locks--
4482 }
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498 func entersyscall() {
4499
4500
4501
4502
4503 fp := getcallerfp()
4504 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4505 }
4506
4507 func entersyscall_sysmon() {
4508 lock(&sched.lock)
4509 if sched.sysmonwait.Load() {
4510 sched.sysmonwait.Store(false)
4511 notewakeup(&sched.sysmonnote)
4512 }
4513 unlock(&sched.lock)
4514 }
4515
4516 func entersyscall_gcwait() {
4517 gp := getg()
4518 pp := gp.m.oldp.ptr()
4519
4520 lock(&sched.lock)
4521 trace := traceAcquire()
4522 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4523 if trace.ok() {
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533 trace.ProcSteal(pp, true)
4534 traceRelease(trace)
4535 }
4536 pp.gcStopTime = nanotime()
4537 pp.syscalltick++
4538 if sched.stopwait--; sched.stopwait == 0 {
4539 notewakeup(&sched.stopnote)
4540 }
4541 } else if trace.ok() {
4542 traceRelease(trace)
4543 }
4544 unlock(&sched.lock)
4545 }
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559 func entersyscallblock() {
4560 gp := getg()
4561
4562 gp.m.locks++
4563 gp.throwsplit = true
4564 gp.stackguard0 = stackPreempt
4565 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4566 gp.m.p.ptr().syscalltick++
4567
4568
4569 pc := sys.GetCallerPC()
4570 sp := sys.GetCallerSP()
4571 bp := getcallerfp()
4572 save(pc, sp, bp)
4573 gp.syscallsp = gp.sched.sp
4574 gp.syscallpc = gp.sched.pc
4575 gp.syscallbp = gp.sched.bp
4576 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4577 sp1 := sp
4578 sp2 := gp.sched.sp
4579 sp3 := gp.syscallsp
4580 systemstack(func() {
4581 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4582 throw("entersyscallblock")
4583 })
4584 }
4585 casgstatus(gp, _Grunning, _Gsyscall)
4586 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4587 systemstack(func() {
4588 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4589 throw("entersyscallblock")
4590 })
4591 }
4592 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4593 systemstack(func() {
4594 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4595 throw("entersyscallblock")
4596 })
4597 }
4598
4599 systemstack(entersyscallblock_handoff)
4600
4601
4602 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4603
4604 gp.m.locks--
4605 }
4606
4607 func entersyscallblock_handoff() {
4608 trace := traceAcquire()
4609 if trace.ok() {
4610 trace.GoSysCall()
4611 traceRelease(trace)
4612 }
4613 handoffp(releasep())
4614 }
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636 func exitsyscall() {
4637 gp := getg()
4638
4639 gp.m.locks++
4640 if sys.GetCallerSP() > gp.syscallsp {
4641 throw("exitsyscall: syscall frame is no longer valid")
4642 }
4643
4644 gp.waitsince = 0
4645 oldp := gp.m.oldp.ptr()
4646 gp.m.oldp = 0
4647 if exitsyscallfast(oldp) {
4648
4649
4650 if goroutineProfile.active {
4651
4652
4653
4654 systemstack(func() {
4655 tryRecordGoroutineProfileWB(gp)
4656 })
4657 }
4658 trace := traceAcquire()
4659 if trace.ok() {
4660 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4661 systemstack(func() {
4662
4663
4664
4665
4666 trace.GoSysExit(lostP)
4667 if lostP {
4668
4669
4670
4671
4672 trace.GoStart()
4673 }
4674 })
4675 }
4676
4677 gp.m.p.ptr().syscalltick++
4678
4679 casgstatus(gp, _Gsyscall, _Grunning)
4680 if trace.ok() {
4681 traceRelease(trace)
4682 }
4683
4684
4685
4686 gp.syscallsp = 0
4687 gp.m.locks--
4688 if gp.preempt {
4689
4690 gp.stackguard0 = stackPreempt
4691 } else {
4692
4693 gp.stackguard0 = gp.stack.lo + stackGuard
4694 }
4695 gp.throwsplit = false
4696
4697 if sched.disable.user && !schedEnabled(gp) {
4698
4699 Gosched()
4700 }
4701
4702 return
4703 }
4704
4705 gp.m.locks--
4706
4707
4708 mcall(exitsyscall0)
4709
4710
4711
4712
4713
4714
4715
4716 gp.syscallsp = 0
4717 gp.m.p.ptr().syscalltick++
4718 gp.throwsplit = false
4719 }
4720
4721
4722 func exitsyscallfast(oldp *p) bool {
4723
4724 if sched.stopwait == freezeStopWait {
4725 return false
4726 }
4727
4728
4729 trace := traceAcquire()
4730 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4731
4732 wirep(oldp)
4733 exitsyscallfast_reacquired(trace)
4734 if trace.ok() {
4735 traceRelease(trace)
4736 }
4737 return true
4738 }
4739 if trace.ok() {
4740 traceRelease(trace)
4741 }
4742
4743
4744 if sched.pidle != 0 {
4745 var ok bool
4746 systemstack(func() {
4747 ok = exitsyscallfast_pidle()
4748 })
4749 if ok {
4750 return true
4751 }
4752 }
4753 return false
4754 }
4755
4756
4757
4758
4759
4760
4761 func exitsyscallfast_reacquired(trace traceLocker) {
4762 gp := getg()
4763 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4764 if trace.ok() {
4765
4766
4767
4768 systemstack(func() {
4769
4770
4771 trace.ProcSteal(gp.m.p.ptr(), true)
4772 trace.ProcStart()
4773 })
4774 }
4775 gp.m.p.ptr().syscalltick++
4776 }
4777 }
4778
4779 func exitsyscallfast_pidle() bool {
4780 lock(&sched.lock)
4781 pp, _ := pidleget(0)
4782 if pp != nil && sched.sysmonwait.Load() {
4783 sched.sysmonwait.Store(false)
4784 notewakeup(&sched.sysmonnote)
4785 }
4786 unlock(&sched.lock)
4787 if pp != nil {
4788 acquirep(pp)
4789 return true
4790 }
4791 return false
4792 }
4793
4794
4795
4796
4797
4798
4799
4800 func exitsyscall0(gp *g) {
4801 var trace traceLocker
4802 traceExitingSyscall()
4803 trace = traceAcquire()
4804 casgstatus(gp, _Gsyscall, _Grunnable)
4805 traceExitedSyscall()
4806 if trace.ok() {
4807
4808
4809
4810
4811 trace.GoSysExit(true)
4812 traceRelease(trace)
4813 }
4814 dropg()
4815 lock(&sched.lock)
4816 var pp *p
4817 if schedEnabled(gp) {
4818 pp, _ = pidleget(0)
4819 }
4820 var locked bool
4821 if pp == nil {
4822 globrunqput(gp)
4823
4824
4825
4826
4827
4828
4829 locked = gp.lockedm != 0
4830 } else if sched.sysmonwait.Load() {
4831 sched.sysmonwait.Store(false)
4832 notewakeup(&sched.sysmonnote)
4833 }
4834 unlock(&sched.lock)
4835 if pp != nil {
4836 acquirep(pp)
4837 execute(gp, false)
4838 }
4839 if locked {
4840
4841
4842
4843
4844 stoplockedm()
4845 execute(gp, false)
4846 }
4847 stopm()
4848 schedule()
4849 }
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863 func syscall_runtime_BeforeFork() {
4864 gp := getg().m.curg
4865
4866
4867
4868
4869 gp.m.locks++
4870 sigsave(&gp.m.sigmask)
4871 sigblock(false)
4872
4873
4874
4875
4876
4877 gp.stackguard0 = stackFork
4878 }
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892 func syscall_runtime_AfterFork() {
4893 gp := getg().m.curg
4894
4895
4896 gp.stackguard0 = gp.stack.lo + stackGuard
4897
4898 msigrestore(gp.m.sigmask)
4899
4900 gp.m.locks--
4901 }
4902
4903
4904
4905 var inForkedChild bool
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926 func syscall_runtime_AfterForkInChild() {
4927
4928
4929
4930
4931 inForkedChild = true
4932
4933 clearSignalHandlers()
4934
4935
4936
4937 msigrestore(getg().m.sigmask)
4938
4939 inForkedChild = false
4940 }
4941
4942
4943
4944
4945 var pendingPreemptSignals atomic.Int32
4946
4947
4948
4949
4950 func syscall_runtime_BeforeExec() {
4951
4952 execLock.lock()
4953
4954
4955
4956 if GOOS == "darwin" || GOOS == "ios" {
4957 for pendingPreemptSignals.Load() > 0 {
4958 osyield()
4959 }
4960 }
4961 }
4962
4963
4964
4965
4966 func syscall_runtime_AfterExec() {
4967 execLock.unlock()
4968 }
4969
4970
4971 func malg(stacksize int32) *g {
4972 newg := new(g)
4973 if stacksize >= 0 {
4974 stacksize = round2(stackSystem + stacksize)
4975 systemstack(func() {
4976 newg.stack = stackalloc(uint32(stacksize))
4977 })
4978 newg.stackguard0 = newg.stack.lo + stackGuard
4979 newg.stackguard1 = ^uintptr(0)
4980
4981
4982 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4983 }
4984 return newg
4985 }
4986
4987
4988
4989
4990 func newproc(fn *funcval) {
4991 gp := getg()
4992 pc := sys.GetCallerPC()
4993 systemstack(func() {
4994 newg := newproc1(fn, gp, pc, false, waitReasonZero)
4995
4996 pp := getg().m.p.ptr()
4997 runqput(pp, newg, true)
4998
4999 if mainStarted {
5000 wakep()
5001 }
5002 })
5003 }
5004
5005
5006
5007
5008 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5009 if fn == nil {
5010 fatal("go of nil func value")
5011 }
5012
5013 mp := acquirem()
5014 pp := mp.p.ptr()
5015 newg := gfget(pp)
5016 if newg == nil {
5017 newg = malg(stackMin)
5018 casgstatus(newg, _Gidle, _Gdead)
5019 allgadd(newg)
5020 }
5021 if newg.stack.hi == 0 {
5022 throw("newproc1: newg missing stack")
5023 }
5024
5025 if readgstatus(newg) != _Gdead {
5026 throw("newproc1: new g is not Gdead")
5027 }
5028
5029 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5030 totalSize = alignUp(totalSize, sys.StackAlign)
5031 sp := newg.stack.hi - totalSize
5032 if usesLR {
5033
5034 *(*uintptr)(unsafe.Pointer(sp)) = 0
5035 prepGoExitFrame(sp)
5036 }
5037 if GOARCH == "arm64" {
5038
5039 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5040 }
5041
5042 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5043 newg.sched.sp = sp
5044 newg.stktopsp = sp
5045 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5046 newg.sched.g = guintptr(unsafe.Pointer(newg))
5047 gostartcallfn(&newg.sched, fn)
5048 newg.parentGoid = callergp.goid
5049 newg.gopc = callerpc
5050 newg.ancestors = saveAncestors(callergp)
5051 newg.startpc = fn.fn
5052 if isSystemGoroutine(newg, false) {
5053 sched.ngsys.Add(1)
5054 } else {
5055
5056 if mp.curg != nil {
5057 newg.labels = mp.curg.labels
5058 }
5059 if goroutineProfile.active {
5060
5061
5062
5063
5064
5065 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5066 }
5067 }
5068
5069 newg.trackingSeq = uint8(cheaprand())
5070 if newg.trackingSeq%gTrackingPeriod == 0 {
5071 newg.tracking = true
5072 }
5073 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5074
5075
5076 trace := traceAcquire()
5077 var status uint32 = _Grunnable
5078 if parked {
5079 status = _Gwaiting
5080 newg.waitreason = waitreason
5081 }
5082 casgstatus(newg, _Gdead, status)
5083 if pp.goidcache == pp.goidcacheend {
5084
5085
5086
5087 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5088 pp.goidcache -= _GoidCacheBatch - 1
5089 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5090 }
5091 newg.goid = pp.goidcache
5092 pp.goidcache++
5093 newg.trace.reset()
5094 if trace.ok() {
5095 trace.GoCreate(newg, newg.startpc, parked)
5096 traceRelease(trace)
5097 }
5098
5099
5100 if raceenabled {
5101 newg.racectx = racegostart(callerpc)
5102 newg.raceignore = 0
5103 if newg.labels != nil {
5104
5105
5106 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5107 }
5108 }
5109 releasem(mp)
5110
5111 return newg
5112 }
5113
5114
5115
5116
5117 func saveAncestors(callergp *g) *[]ancestorInfo {
5118
5119 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5120 return nil
5121 }
5122 var callerAncestors []ancestorInfo
5123 if callergp.ancestors != nil {
5124 callerAncestors = *callergp.ancestors
5125 }
5126 n := int32(len(callerAncestors)) + 1
5127 if n > debug.tracebackancestors {
5128 n = debug.tracebackancestors
5129 }
5130 ancestors := make([]ancestorInfo, n)
5131 copy(ancestors[1:], callerAncestors)
5132
5133 var pcs [tracebackInnerFrames]uintptr
5134 npcs := gcallers(callergp, 0, pcs[:])
5135 ipcs := make([]uintptr, npcs)
5136 copy(ipcs, pcs[:])
5137 ancestors[0] = ancestorInfo{
5138 pcs: ipcs,
5139 goid: callergp.goid,
5140 gopc: callergp.gopc,
5141 }
5142
5143 ancestorsp := new([]ancestorInfo)
5144 *ancestorsp = ancestors
5145 return ancestorsp
5146 }
5147
5148
5149
5150 func gfput(pp *p, gp *g) {
5151 if readgstatus(gp) != _Gdead {
5152 throw("gfput: bad status (not Gdead)")
5153 }
5154
5155 stksize := gp.stack.hi - gp.stack.lo
5156
5157 if stksize != uintptr(startingStackSize) {
5158
5159 stackfree(gp.stack)
5160 gp.stack.lo = 0
5161 gp.stack.hi = 0
5162 gp.stackguard0 = 0
5163 }
5164
5165 pp.gFree.push(gp)
5166 pp.gFree.n++
5167 if pp.gFree.n >= 64 {
5168 var (
5169 inc int32
5170 stackQ gQueue
5171 noStackQ gQueue
5172 )
5173 for pp.gFree.n >= 32 {
5174 gp := pp.gFree.pop()
5175 pp.gFree.n--
5176 if gp.stack.lo == 0 {
5177 noStackQ.push(gp)
5178 } else {
5179 stackQ.push(gp)
5180 }
5181 inc++
5182 }
5183 lock(&sched.gFree.lock)
5184 sched.gFree.noStack.pushAll(noStackQ)
5185 sched.gFree.stack.pushAll(stackQ)
5186 sched.gFree.n += inc
5187 unlock(&sched.gFree.lock)
5188 }
5189 }
5190
5191
5192
5193 func gfget(pp *p) *g {
5194 retry:
5195 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5196 lock(&sched.gFree.lock)
5197
5198 for pp.gFree.n < 32 {
5199
5200 gp := sched.gFree.stack.pop()
5201 if gp == nil {
5202 gp = sched.gFree.noStack.pop()
5203 if gp == nil {
5204 break
5205 }
5206 }
5207 sched.gFree.n--
5208 pp.gFree.push(gp)
5209 pp.gFree.n++
5210 }
5211 unlock(&sched.gFree.lock)
5212 goto retry
5213 }
5214 gp := pp.gFree.pop()
5215 if gp == nil {
5216 return nil
5217 }
5218 pp.gFree.n--
5219 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5220
5221
5222
5223 systemstack(func() {
5224 stackfree(gp.stack)
5225 gp.stack.lo = 0
5226 gp.stack.hi = 0
5227 gp.stackguard0 = 0
5228 })
5229 }
5230 if gp.stack.lo == 0 {
5231
5232 systemstack(func() {
5233 gp.stack = stackalloc(startingStackSize)
5234 })
5235 gp.stackguard0 = gp.stack.lo + stackGuard
5236 } else {
5237 if raceenabled {
5238 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5239 }
5240 if msanenabled {
5241 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5242 }
5243 if asanenabled {
5244 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5245 }
5246 }
5247 return gp
5248 }
5249
5250
5251 func gfpurge(pp *p) {
5252 var (
5253 inc int32
5254 stackQ gQueue
5255 noStackQ gQueue
5256 )
5257 for !pp.gFree.empty() {
5258 gp := pp.gFree.pop()
5259 pp.gFree.n--
5260 if gp.stack.lo == 0 {
5261 noStackQ.push(gp)
5262 } else {
5263 stackQ.push(gp)
5264 }
5265 inc++
5266 }
5267 lock(&sched.gFree.lock)
5268 sched.gFree.noStack.pushAll(noStackQ)
5269 sched.gFree.stack.pushAll(stackQ)
5270 sched.gFree.n += inc
5271 unlock(&sched.gFree.lock)
5272 }
5273
5274
5275 func Breakpoint() {
5276 breakpoint()
5277 }
5278
5279
5280
5281
5282
5283
5284 func dolockOSThread() {
5285 if GOARCH == "wasm" {
5286 return
5287 }
5288 gp := getg()
5289 gp.m.lockedg.set(gp)
5290 gp.lockedm.set(gp.m)
5291 }
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309 func LockOSThread() {
5310 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5311
5312
5313
5314 startTemplateThread()
5315 }
5316 gp := getg()
5317 gp.m.lockedExt++
5318 if gp.m.lockedExt == 0 {
5319 gp.m.lockedExt--
5320 panic("LockOSThread nesting overflow")
5321 }
5322 dolockOSThread()
5323 }
5324
5325
5326 func lockOSThread() {
5327 getg().m.lockedInt++
5328 dolockOSThread()
5329 }
5330
5331
5332
5333
5334
5335
5336 func dounlockOSThread() {
5337 if GOARCH == "wasm" {
5338 return
5339 }
5340 gp := getg()
5341 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5342 return
5343 }
5344 gp.m.lockedg = 0
5345 gp.lockedm = 0
5346 }
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362 func UnlockOSThread() {
5363 gp := getg()
5364 if gp.m.lockedExt == 0 {
5365 return
5366 }
5367 gp.m.lockedExt--
5368 dounlockOSThread()
5369 }
5370
5371
5372 func unlockOSThread() {
5373 gp := getg()
5374 if gp.m.lockedInt == 0 {
5375 systemstack(badunlockosthread)
5376 }
5377 gp.m.lockedInt--
5378 dounlockOSThread()
5379 }
5380
5381 func badunlockosthread() {
5382 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5383 }
5384
5385 func gcount() int32 {
5386 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5387 for _, pp := range allp {
5388 n -= pp.gFree.n
5389 }
5390
5391
5392
5393 if n < 1 {
5394 n = 1
5395 }
5396 return n
5397 }
5398
5399 func mcount() int32 {
5400 return int32(sched.mnext - sched.nmfreed)
5401 }
5402
5403 var prof struct {
5404 signalLock atomic.Uint32
5405
5406
5407
5408 hz atomic.Int32
5409 }
5410
5411 func _System() { _System() }
5412 func _ExternalCode() { _ExternalCode() }
5413 func _LostExternalCode() { _LostExternalCode() }
5414 func _GC() { _GC() }
5415 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5416 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5417 func _VDSO() { _VDSO() }
5418
5419
5420
5421
5422
5423 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5424 if prof.hz.Load() == 0 {
5425 return
5426 }
5427
5428
5429
5430
5431 if mp != nil && mp.profilehz == 0 {
5432 return
5433 }
5434
5435
5436
5437
5438
5439
5440
5441 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5442 if f := findfunc(pc); f.valid() {
5443 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5444 cpuprof.lostAtomic++
5445 return
5446 }
5447 }
5448 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5449
5450
5451
5452 cpuprof.lostAtomic++
5453 return
5454 }
5455 }
5456
5457
5458
5459
5460
5461
5462
5463 getg().m.mallocing++
5464
5465 var u unwinder
5466 var stk [maxCPUProfStack]uintptr
5467 n := 0
5468 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5469 cgoOff := 0
5470
5471
5472
5473
5474
5475 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5476 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5477 cgoOff++
5478 }
5479 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5480 mp.cgoCallers[0] = 0
5481 }
5482
5483
5484 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5485 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5486
5487
5488 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5489 } else if mp != nil && mp.vdsoSP != 0 {
5490
5491
5492 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5493 } else {
5494 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5495 }
5496 n += tracebackPCs(&u, 0, stk[n:])
5497
5498 if n <= 0 {
5499
5500
5501 n = 2
5502 if inVDSOPage(pc) {
5503 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5504 } else if pc > firstmoduledata.etext {
5505
5506 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5507 }
5508 stk[0] = pc
5509 if mp.preemptoff != "" {
5510 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5511 } else {
5512 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5513 }
5514 }
5515
5516 if prof.hz.Load() != 0 {
5517
5518
5519
5520 var tagPtr *unsafe.Pointer
5521 if gp != nil && gp.m != nil && gp.m.curg != nil {
5522 tagPtr = &gp.m.curg.labels
5523 }
5524 cpuprof.add(tagPtr, stk[:n])
5525
5526 gprof := gp
5527 var mp *m
5528 var pp *p
5529 if gp != nil && gp.m != nil {
5530 if gp.m.curg != nil {
5531 gprof = gp.m.curg
5532 }
5533 mp = gp.m
5534 pp = gp.m.p.ptr()
5535 }
5536 traceCPUSample(gprof, mp, pp, stk[:n])
5537 }
5538 getg().m.mallocing--
5539 }
5540
5541
5542
5543 func setcpuprofilerate(hz int32) {
5544
5545 if hz < 0 {
5546 hz = 0
5547 }
5548
5549
5550
5551 gp := getg()
5552 gp.m.locks++
5553
5554
5555
5556
5557 setThreadCPUProfiler(0)
5558
5559 for !prof.signalLock.CompareAndSwap(0, 1) {
5560 osyield()
5561 }
5562 if prof.hz.Load() != hz {
5563 setProcessCPUProfiler(hz)
5564 prof.hz.Store(hz)
5565 }
5566 prof.signalLock.Store(0)
5567
5568 lock(&sched.lock)
5569 sched.profilehz = hz
5570 unlock(&sched.lock)
5571
5572 if hz != 0 {
5573 setThreadCPUProfiler(hz)
5574 }
5575
5576 gp.m.locks--
5577 }
5578
5579
5580
5581 func (pp *p) init(id int32) {
5582 pp.id = id
5583 pp.status = _Pgcstop
5584 pp.sudogcache = pp.sudogbuf[:0]
5585 pp.deferpool = pp.deferpoolbuf[:0]
5586 pp.wbBuf.reset()
5587 if pp.mcache == nil {
5588 if id == 0 {
5589 if mcache0 == nil {
5590 throw("missing mcache?")
5591 }
5592
5593
5594 pp.mcache = mcache0
5595 } else {
5596 pp.mcache = allocmcache()
5597 }
5598 }
5599 if raceenabled && pp.raceprocctx == 0 {
5600 if id == 0 {
5601 pp.raceprocctx = raceprocctx0
5602 raceprocctx0 = 0
5603 } else {
5604 pp.raceprocctx = raceproccreate()
5605 }
5606 }
5607 lockInit(&pp.timers.mu, lockRankTimers)
5608
5609
5610
5611 timerpMask.set(id)
5612
5613
5614 idlepMask.clear(id)
5615 }
5616
5617
5618
5619
5620
5621 func (pp *p) destroy() {
5622 assertLockHeld(&sched.lock)
5623 assertWorldStopped()
5624
5625
5626 for pp.runqhead != pp.runqtail {
5627
5628 pp.runqtail--
5629 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5630
5631 globrunqputhead(gp)
5632 }
5633 if pp.runnext != 0 {
5634 globrunqputhead(pp.runnext.ptr())
5635 pp.runnext = 0
5636 }
5637
5638
5639 getg().m.p.ptr().timers.take(&pp.timers)
5640
5641
5642 if gcphase != _GCoff {
5643 wbBufFlush1(pp)
5644 pp.gcw.dispose()
5645 }
5646 for i := range pp.sudogbuf {
5647 pp.sudogbuf[i] = nil
5648 }
5649 pp.sudogcache = pp.sudogbuf[:0]
5650 pp.pinnerCache = nil
5651 for j := range pp.deferpoolbuf {
5652 pp.deferpoolbuf[j] = nil
5653 }
5654 pp.deferpool = pp.deferpoolbuf[:0]
5655 systemstack(func() {
5656 for i := 0; i < pp.mspancache.len; i++ {
5657
5658 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5659 }
5660 pp.mspancache.len = 0
5661 lock(&mheap_.lock)
5662 pp.pcache.flush(&mheap_.pages)
5663 unlock(&mheap_.lock)
5664 })
5665 freemcache(pp.mcache)
5666 pp.mcache = nil
5667 gfpurge(pp)
5668 if raceenabled {
5669 if pp.timers.raceCtx != 0 {
5670
5671
5672
5673
5674
5675 mp := getg().m
5676 phold := mp.p.ptr()
5677 mp.p.set(pp)
5678
5679 racectxend(pp.timers.raceCtx)
5680 pp.timers.raceCtx = 0
5681
5682 mp.p.set(phold)
5683 }
5684 raceprocdestroy(pp.raceprocctx)
5685 pp.raceprocctx = 0
5686 }
5687 pp.gcAssistTime = 0
5688 pp.status = _Pdead
5689 }
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699 func procresize(nprocs int32) *p {
5700 assertLockHeld(&sched.lock)
5701 assertWorldStopped()
5702
5703 old := gomaxprocs
5704 if old < 0 || nprocs <= 0 {
5705 throw("procresize: invalid arg")
5706 }
5707 trace := traceAcquire()
5708 if trace.ok() {
5709 trace.Gomaxprocs(nprocs)
5710 traceRelease(trace)
5711 }
5712
5713
5714 now := nanotime()
5715 if sched.procresizetime != 0 {
5716 sched.totaltime += int64(old) * (now - sched.procresizetime)
5717 }
5718 sched.procresizetime = now
5719
5720 maskWords := (nprocs + 31) / 32
5721
5722
5723 if nprocs > int32(len(allp)) {
5724
5725
5726 lock(&allpLock)
5727 if nprocs <= int32(cap(allp)) {
5728 allp = allp[:nprocs]
5729 } else {
5730 nallp := make([]*p, nprocs)
5731
5732
5733 copy(nallp, allp[:cap(allp)])
5734 allp = nallp
5735 }
5736
5737 if maskWords <= int32(cap(idlepMask)) {
5738 idlepMask = idlepMask[:maskWords]
5739 timerpMask = timerpMask[:maskWords]
5740 } else {
5741 nidlepMask := make([]uint32, maskWords)
5742
5743 copy(nidlepMask, idlepMask)
5744 idlepMask = nidlepMask
5745
5746 ntimerpMask := make([]uint32, maskWords)
5747 copy(ntimerpMask, timerpMask)
5748 timerpMask = ntimerpMask
5749 }
5750 unlock(&allpLock)
5751 }
5752
5753
5754 for i := old; i < nprocs; i++ {
5755 pp := allp[i]
5756 if pp == nil {
5757 pp = new(p)
5758 }
5759 pp.init(i)
5760 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5761 }
5762
5763 gp := getg()
5764 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5765
5766 gp.m.p.ptr().status = _Prunning
5767 gp.m.p.ptr().mcache.prepareForSweep()
5768 } else {
5769
5770
5771
5772
5773
5774 if gp.m.p != 0 {
5775 trace := traceAcquire()
5776 if trace.ok() {
5777
5778
5779
5780 trace.GoSched()
5781 trace.ProcStop(gp.m.p.ptr())
5782 traceRelease(trace)
5783 }
5784 gp.m.p.ptr().m = 0
5785 }
5786 gp.m.p = 0
5787 pp := allp[0]
5788 pp.m = 0
5789 pp.status = _Pidle
5790 acquirep(pp)
5791 trace := traceAcquire()
5792 if trace.ok() {
5793 trace.GoStart()
5794 traceRelease(trace)
5795 }
5796 }
5797
5798
5799 mcache0 = nil
5800
5801
5802 for i := nprocs; i < old; i++ {
5803 pp := allp[i]
5804 pp.destroy()
5805
5806 }
5807
5808
5809 if int32(len(allp)) != nprocs {
5810 lock(&allpLock)
5811 allp = allp[:nprocs]
5812 idlepMask = idlepMask[:maskWords]
5813 timerpMask = timerpMask[:maskWords]
5814 unlock(&allpLock)
5815 }
5816
5817 var runnablePs *p
5818 for i := nprocs - 1; i >= 0; i-- {
5819 pp := allp[i]
5820 if gp.m.p.ptr() == pp {
5821 continue
5822 }
5823 pp.status = _Pidle
5824 if runqempty(pp) {
5825 pidleput(pp, now)
5826 } else {
5827 pp.m.set(mget())
5828 pp.link.set(runnablePs)
5829 runnablePs = pp
5830 }
5831 }
5832 stealOrder.reset(uint32(nprocs))
5833 var int32p *int32 = &gomaxprocs
5834 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5835 if old != nprocs {
5836
5837 gcCPULimiter.resetCapacity(now, nprocs)
5838 }
5839 return runnablePs
5840 }
5841
5842
5843
5844
5845
5846
5847
5848 func acquirep(pp *p) {
5849
5850 wirep(pp)
5851
5852
5853
5854
5855
5856 pp.mcache.prepareForSweep()
5857
5858 trace := traceAcquire()
5859 if trace.ok() {
5860 trace.ProcStart()
5861 traceRelease(trace)
5862 }
5863 }
5864
5865
5866
5867
5868
5869
5870
5871 func wirep(pp *p) {
5872 gp := getg()
5873
5874 if gp.m.p != 0 {
5875
5876
5877 systemstack(func() {
5878 throw("wirep: already in go")
5879 })
5880 }
5881 if pp.m != 0 || pp.status != _Pidle {
5882
5883
5884 systemstack(func() {
5885 id := int64(0)
5886 if pp.m != 0 {
5887 id = pp.m.ptr().id
5888 }
5889 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5890 throw("wirep: invalid p state")
5891 })
5892 }
5893 gp.m.p.set(pp)
5894 pp.m.set(gp.m)
5895 pp.status = _Prunning
5896 }
5897
5898
5899 func releasep() *p {
5900 trace := traceAcquire()
5901 if trace.ok() {
5902 trace.ProcStop(getg().m.p.ptr())
5903 traceRelease(trace)
5904 }
5905 return releasepNoTrace()
5906 }
5907
5908
5909 func releasepNoTrace() *p {
5910 gp := getg()
5911
5912 if gp.m.p == 0 {
5913 throw("releasep: invalid arg")
5914 }
5915 pp := gp.m.p.ptr()
5916 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5917 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5918 throw("releasep: invalid p state")
5919 }
5920 gp.m.p = 0
5921 pp.m = 0
5922 pp.status = _Pidle
5923 return pp
5924 }
5925
5926 func incidlelocked(v int32) {
5927 lock(&sched.lock)
5928 sched.nmidlelocked += v
5929 if v > 0 {
5930 checkdead()
5931 }
5932 unlock(&sched.lock)
5933 }
5934
5935
5936
5937
5938 func checkdead() {
5939 assertLockHeld(&sched.lock)
5940
5941
5942
5943
5944
5945
5946 if (islibrary || isarchive) && GOARCH != "wasm" {
5947 return
5948 }
5949
5950
5951
5952
5953
5954 if panicking.Load() > 0 {
5955 return
5956 }
5957
5958
5959
5960
5961
5962 var run0 int32
5963 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5964 run0 = 1
5965 }
5966
5967 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5968 if run > run0 {
5969 return
5970 }
5971 if run < 0 {
5972 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5973 unlock(&sched.lock)
5974 throw("checkdead: inconsistent counts")
5975 }
5976
5977 grunning := 0
5978 forEachG(func(gp *g) {
5979 if isSystemGoroutine(gp, false) {
5980 return
5981 }
5982 s := readgstatus(gp)
5983 switch s &^ _Gscan {
5984 case _Gwaiting,
5985 _Gpreempted:
5986 grunning++
5987 case _Grunnable,
5988 _Grunning,
5989 _Gsyscall:
5990 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5991 unlock(&sched.lock)
5992 throw("checkdead: runnable g")
5993 }
5994 })
5995 if grunning == 0 {
5996 unlock(&sched.lock)
5997 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5998 }
5999
6000
6001 if faketime != 0 {
6002 if when := timeSleepUntil(); when < maxWhen {
6003 faketime = when
6004
6005
6006 pp, _ := pidleget(faketime)
6007 if pp == nil {
6008
6009
6010 unlock(&sched.lock)
6011 throw("checkdead: no p for timer")
6012 }
6013 mp := mget()
6014 if mp == nil {
6015
6016
6017 unlock(&sched.lock)
6018 throw("checkdead: no m for timer")
6019 }
6020
6021
6022
6023 sched.nmspinning.Add(1)
6024 mp.spinning = true
6025 mp.nextp.set(pp)
6026 notewakeup(&mp.park)
6027 return
6028 }
6029 }
6030
6031
6032 for _, pp := range allp {
6033 if len(pp.timers.heap) > 0 {
6034 return
6035 }
6036 }
6037
6038 unlock(&sched.lock)
6039 fatal("all goroutines are asleep - deadlock!")
6040 }
6041
6042
6043
6044
6045
6046
6047 var forcegcperiod int64 = 2 * 60 * 1e9
6048
6049
6050
6051 var needSysmonWorkaround bool = false
6052
6053
6054
6055
6056 const haveSysmon = GOARCH != "wasm"
6057
6058
6059
6060
6061 func sysmon() {
6062 lock(&sched.lock)
6063 sched.nmsys++
6064 checkdead()
6065 unlock(&sched.lock)
6066
6067 lasttrace := int64(0)
6068 idle := 0
6069 delay := uint32(0)
6070
6071 for {
6072 if idle == 0 {
6073 delay = 20
6074 } else if idle > 50 {
6075 delay *= 2
6076 }
6077 if delay > 10*1000 {
6078 delay = 10 * 1000
6079 }
6080 usleep(delay)
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097 now := nanotime()
6098 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6099 lock(&sched.lock)
6100 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6101 syscallWake := false
6102 next := timeSleepUntil()
6103 if next > now {
6104 sched.sysmonwait.Store(true)
6105 unlock(&sched.lock)
6106
6107
6108 sleep := forcegcperiod / 2
6109 if next-now < sleep {
6110 sleep = next - now
6111 }
6112 shouldRelax := sleep >= osRelaxMinNS
6113 if shouldRelax {
6114 osRelax(true)
6115 }
6116 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6117 if shouldRelax {
6118 osRelax(false)
6119 }
6120 lock(&sched.lock)
6121 sched.sysmonwait.Store(false)
6122 noteclear(&sched.sysmonnote)
6123 }
6124 if syscallWake {
6125 idle = 0
6126 delay = 20
6127 }
6128 }
6129 unlock(&sched.lock)
6130 }
6131
6132 lock(&sched.sysmonlock)
6133
6134
6135 now = nanotime()
6136
6137
6138 if *cgo_yield != nil {
6139 asmcgocall(*cgo_yield, nil)
6140 }
6141
6142 lastpoll := sched.lastpoll.Load()
6143 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6144 sched.lastpoll.CompareAndSwap(lastpoll, now)
6145 list, delta := netpoll(0)
6146 if !list.empty() {
6147
6148
6149
6150
6151
6152
6153
6154 incidlelocked(-1)
6155 injectglist(&list)
6156 incidlelocked(1)
6157 netpollAdjustWaiters(delta)
6158 }
6159 }
6160 if GOOS == "netbsd" && needSysmonWorkaround {
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176 if next := timeSleepUntil(); next < now {
6177 startm(nil, false, false)
6178 }
6179 }
6180 if scavenger.sysmonWake.Load() != 0 {
6181
6182 scavenger.wake()
6183 }
6184
6185
6186 if retake(now) != 0 {
6187 idle = 0
6188 } else {
6189 idle++
6190 }
6191
6192 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6193 lock(&forcegc.lock)
6194 forcegc.idle.Store(false)
6195 var list gList
6196 list.push(forcegc.g)
6197 injectglist(&list)
6198 unlock(&forcegc.lock)
6199 }
6200 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6201 lasttrace = now
6202 schedtrace(debug.scheddetail > 0)
6203 }
6204 unlock(&sched.sysmonlock)
6205 }
6206 }
6207
6208 type sysmontick struct {
6209 schedtick uint32
6210 syscalltick uint32
6211 schedwhen int64
6212 syscallwhen int64
6213 }
6214
6215
6216
6217 const forcePreemptNS = 10 * 1000 * 1000
6218
6219 func retake(now int64) uint32 {
6220 n := 0
6221
6222
6223 lock(&allpLock)
6224
6225
6226
6227 for i := 0; i < len(allp); i++ {
6228 pp := allp[i]
6229 if pp == nil {
6230
6231
6232 continue
6233 }
6234 pd := &pp.sysmontick
6235 s := pp.status
6236 sysretake := false
6237 if s == _Prunning || s == _Psyscall {
6238
6239
6240
6241
6242 t := int64(pp.schedtick)
6243 if int64(pd.schedtick) != t {
6244 pd.schedtick = uint32(t)
6245 pd.schedwhen = now
6246 } else if pd.schedwhen+forcePreemptNS <= now {
6247 preemptone(pp)
6248
6249
6250 sysretake = true
6251 }
6252 }
6253 if s == _Psyscall {
6254
6255 t := int64(pp.syscalltick)
6256 if !sysretake && int64(pd.syscalltick) != t {
6257 pd.syscalltick = uint32(t)
6258 pd.syscallwhen = now
6259 continue
6260 }
6261
6262
6263
6264 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6265 continue
6266 }
6267
6268 unlock(&allpLock)
6269
6270
6271
6272
6273 incidlelocked(-1)
6274 trace := traceAcquire()
6275 if atomic.Cas(&pp.status, s, _Pidle) {
6276 if trace.ok() {
6277 trace.ProcSteal(pp, false)
6278 traceRelease(trace)
6279 }
6280 n++
6281 pp.syscalltick++
6282 handoffp(pp)
6283 } else if trace.ok() {
6284 traceRelease(trace)
6285 }
6286 incidlelocked(1)
6287 lock(&allpLock)
6288 }
6289 }
6290 unlock(&allpLock)
6291 return uint32(n)
6292 }
6293
6294
6295
6296
6297
6298
6299 func preemptall() bool {
6300 res := false
6301 for _, pp := range allp {
6302 if pp.status != _Prunning {
6303 continue
6304 }
6305 if preemptone(pp) {
6306 res = true
6307 }
6308 }
6309 return res
6310 }
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322 func preemptone(pp *p) bool {
6323 mp := pp.m.ptr()
6324 if mp == nil || mp == getg().m {
6325 return false
6326 }
6327 gp := mp.curg
6328 if gp == nil || gp == mp.g0 {
6329 return false
6330 }
6331
6332 gp.preempt = true
6333
6334
6335
6336
6337
6338 gp.stackguard0 = stackPreempt
6339
6340
6341 if preemptMSupported && debug.asyncpreemptoff == 0 {
6342 pp.preempt = true
6343 preemptM(mp)
6344 }
6345
6346 return true
6347 }
6348
6349 var starttime int64
6350
6351 func schedtrace(detailed bool) {
6352 now := nanotime()
6353 if starttime == 0 {
6354 starttime = now
6355 }
6356
6357 lock(&sched.lock)
6358 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6359 if detailed {
6360 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6361 }
6362
6363
6364
6365 for i, pp := range allp {
6366 mp := pp.m.ptr()
6367 h := atomic.Load(&pp.runqhead)
6368 t := atomic.Load(&pp.runqtail)
6369 if detailed {
6370 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6371 if mp != nil {
6372 print(mp.id)
6373 } else {
6374 print("nil")
6375 }
6376 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6377 } else {
6378
6379
6380 print(" ")
6381 if i == 0 {
6382 print("[")
6383 }
6384 print(t - h)
6385 if i == len(allp)-1 {
6386 print("]\n")
6387 }
6388 }
6389 }
6390
6391 if !detailed {
6392 unlock(&sched.lock)
6393 return
6394 }
6395
6396 for mp := allm; mp != nil; mp = mp.alllink {
6397 pp := mp.p.ptr()
6398 print(" M", mp.id, ": p=")
6399 if pp != nil {
6400 print(pp.id)
6401 } else {
6402 print("nil")
6403 }
6404 print(" curg=")
6405 if mp.curg != nil {
6406 print(mp.curg.goid)
6407 } else {
6408 print("nil")
6409 }
6410 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6411 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6412 print(lockedg.goid)
6413 } else {
6414 print("nil")
6415 }
6416 print("\n")
6417 }
6418
6419 forEachG(func(gp *g) {
6420 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6421 if gp.m != nil {
6422 print(gp.m.id)
6423 } else {
6424 print("nil")
6425 }
6426 print(" lockedm=")
6427 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6428 print(lockedm.id)
6429 } else {
6430 print("nil")
6431 }
6432 print("\n")
6433 })
6434 unlock(&sched.lock)
6435 }
6436
6437
6438
6439
6440
6441
6442 func schedEnableUser(enable bool) {
6443 lock(&sched.lock)
6444 if sched.disable.user == !enable {
6445 unlock(&sched.lock)
6446 return
6447 }
6448 sched.disable.user = !enable
6449 if enable {
6450 n := sched.disable.n
6451 sched.disable.n = 0
6452 globrunqputbatch(&sched.disable.runnable, n)
6453 unlock(&sched.lock)
6454 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6455 startm(nil, false, false)
6456 }
6457 } else {
6458 unlock(&sched.lock)
6459 }
6460 }
6461
6462
6463
6464
6465
6466 func schedEnabled(gp *g) bool {
6467 assertLockHeld(&sched.lock)
6468
6469 if sched.disable.user {
6470 return isSystemGoroutine(gp, true)
6471 }
6472 return true
6473 }
6474
6475
6476
6477
6478
6479
6480 func mput(mp *m) {
6481 assertLockHeld(&sched.lock)
6482
6483 mp.schedlink = sched.midle
6484 sched.midle.set(mp)
6485 sched.nmidle++
6486 checkdead()
6487 }
6488
6489
6490
6491
6492
6493
6494 func mget() *m {
6495 assertLockHeld(&sched.lock)
6496
6497 mp := sched.midle.ptr()
6498 if mp != nil {
6499 sched.midle = mp.schedlink
6500 sched.nmidle--
6501 }
6502 return mp
6503 }
6504
6505
6506
6507
6508
6509
6510 func globrunqput(gp *g) {
6511 assertLockHeld(&sched.lock)
6512
6513 sched.runq.pushBack(gp)
6514 sched.runqsize++
6515 }
6516
6517
6518
6519
6520
6521
6522 func globrunqputhead(gp *g) {
6523 assertLockHeld(&sched.lock)
6524
6525 sched.runq.push(gp)
6526 sched.runqsize++
6527 }
6528
6529
6530
6531
6532
6533
6534
6535 func globrunqputbatch(batch *gQueue, n int32) {
6536 assertLockHeld(&sched.lock)
6537
6538 sched.runq.pushBackAll(*batch)
6539 sched.runqsize += n
6540 *batch = gQueue{}
6541 }
6542
6543
6544
6545 func globrunqget(pp *p, max int32) *g {
6546 assertLockHeld(&sched.lock)
6547
6548 if sched.runqsize == 0 {
6549 return nil
6550 }
6551
6552 n := sched.runqsize/gomaxprocs + 1
6553 if n > sched.runqsize {
6554 n = sched.runqsize
6555 }
6556 if max > 0 && n > max {
6557 n = max
6558 }
6559 if n > int32(len(pp.runq))/2 {
6560 n = int32(len(pp.runq)) / 2
6561 }
6562
6563 sched.runqsize -= n
6564
6565 gp := sched.runq.pop()
6566 n--
6567 for ; n > 0; n-- {
6568 gp1 := sched.runq.pop()
6569 runqput(pp, gp1, false)
6570 }
6571 return gp
6572 }
6573
6574
6575 type pMask []uint32
6576
6577
6578 func (p pMask) read(id uint32) bool {
6579 word := id / 32
6580 mask := uint32(1) << (id % 32)
6581 return (atomic.Load(&p[word]) & mask) != 0
6582 }
6583
6584
6585 func (p pMask) set(id int32) {
6586 word := id / 32
6587 mask := uint32(1) << (id % 32)
6588 atomic.Or(&p[word], mask)
6589 }
6590
6591
6592 func (p pMask) clear(id int32) {
6593 word := id / 32
6594 mask := uint32(1) << (id % 32)
6595 atomic.And(&p[word], ^mask)
6596 }
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609 func pidleput(pp *p, now int64) int64 {
6610 assertLockHeld(&sched.lock)
6611
6612 if !runqempty(pp) {
6613 throw("pidleput: P has non-empty run queue")
6614 }
6615 if now == 0 {
6616 now = nanotime()
6617 }
6618 if pp.timers.len.Load() == 0 {
6619 timerpMask.clear(pp.id)
6620 }
6621 idlepMask.set(pp.id)
6622 pp.link = sched.pidle
6623 sched.pidle.set(pp)
6624 sched.npidle.Add(1)
6625 if !pp.limiterEvent.start(limiterEventIdle, now) {
6626 throw("must be able to track idle limiter event")
6627 }
6628 return now
6629 }
6630
6631
6632
6633
6634
6635
6636
6637
6638 func pidleget(now int64) (*p, int64) {
6639 assertLockHeld(&sched.lock)
6640
6641 pp := sched.pidle.ptr()
6642 if pp != nil {
6643
6644 if now == 0 {
6645 now = nanotime()
6646 }
6647 timerpMask.set(pp.id)
6648 idlepMask.clear(pp.id)
6649 sched.pidle = pp.link
6650 sched.npidle.Add(-1)
6651 pp.limiterEvent.stop(limiterEventIdle, now)
6652 }
6653 return pp, now
6654 }
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666 func pidlegetSpinning(now int64) (*p, int64) {
6667 assertLockHeld(&sched.lock)
6668
6669 pp, now := pidleget(now)
6670 if pp == nil {
6671
6672
6673
6674 sched.needspinning.Store(1)
6675 return nil, now
6676 }
6677
6678 return pp, now
6679 }
6680
6681
6682
6683 func runqempty(pp *p) bool {
6684
6685
6686
6687
6688 for {
6689 head := atomic.Load(&pp.runqhead)
6690 tail := atomic.Load(&pp.runqtail)
6691 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6692 if tail == atomic.Load(&pp.runqtail) {
6693 return head == tail && runnext == 0
6694 }
6695 }
6696 }
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707 const randomizeScheduler = raceenabled
6708
6709
6710
6711
6712
6713
6714 func runqput(pp *p, gp *g, next bool) {
6715 if !haveSysmon && next {
6716
6717
6718
6719
6720
6721
6722
6723
6724 next = false
6725 }
6726 if randomizeScheduler && next && randn(2) == 0 {
6727 next = false
6728 }
6729
6730 if next {
6731 retryNext:
6732 oldnext := pp.runnext
6733 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6734 goto retryNext
6735 }
6736 if oldnext == 0 {
6737 return
6738 }
6739
6740 gp = oldnext.ptr()
6741 }
6742
6743 retry:
6744 h := atomic.LoadAcq(&pp.runqhead)
6745 t := pp.runqtail
6746 if t-h < uint32(len(pp.runq)) {
6747 pp.runq[t%uint32(len(pp.runq))].set(gp)
6748 atomic.StoreRel(&pp.runqtail, t+1)
6749 return
6750 }
6751 if runqputslow(pp, gp, h, t) {
6752 return
6753 }
6754
6755 goto retry
6756 }
6757
6758
6759
6760 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6761 var batch [len(pp.runq)/2 + 1]*g
6762
6763
6764 n := t - h
6765 n = n / 2
6766 if n != uint32(len(pp.runq)/2) {
6767 throw("runqputslow: queue is not full")
6768 }
6769 for i := uint32(0); i < n; i++ {
6770 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6771 }
6772 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6773 return false
6774 }
6775 batch[n] = gp
6776
6777 if randomizeScheduler {
6778 for i := uint32(1); i <= n; i++ {
6779 j := cheaprandn(i + 1)
6780 batch[i], batch[j] = batch[j], batch[i]
6781 }
6782 }
6783
6784
6785 for i := uint32(0); i < n; i++ {
6786 batch[i].schedlink.set(batch[i+1])
6787 }
6788 var q gQueue
6789 q.head.set(batch[0])
6790 q.tail.set(batch[n])
6791
6792
6793 lock(&sched.lock)
6794 globrunqputbatch(&q, int32(n+1))
6795 unlock(&sched.lock)
6796 return true
6797 }
6798
6799
6800
6801
6802
6803 func runqputbatch(pp *p, q *gQueue, qsize int) {
6804 h := atomic.LoadAcq(&pp.runqhead)
6805 t := pp.runqtail
6806 n := uint32(0)
6807 for !q.empty() && t-h < uint32(len(pp.runq)) {
6808 gp := q.pop()
6809 pp.runq[t%uint32(len(pp.runq))].set(gp)
6810 t++
6811 n++
6812 }
6813 qsize -= int(n)
6814
6815 if randomizeScheduler {
6816 off := func(o uint32) uint32 {
6817 return (pp.runqtail + o) % uint32(len(pp.runq))
6818 }
6819 for i := uint32(1); i < n; i++ {
6820 j := cheaprandn(i + 1)
6821 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6822 }
6823 }
6824
6825 atomic.StoreRel(&pp.runqtail, t)
6826 if !q.empty() {
6827 lock(&sched.lock)
6828 globrunqputbatch(q, int32(qsize))
6829 unlock(&sched.lock)
6830 }
6831 }
6832
6833
6834
6835
6836
6837 func runqget(pp *p) (gp *g, inheritTime bool) {
6838
6839 next := pp.runnext
6840
6841
6842
6843 if next != 0 && pp.runnext.cas(next, 0) {
6844 return next.ptr(), true
6845 }
6846
6847 for {
6848 h := atomic.LoadAcq(&pp.runqhead)
6849 t := pp.runqtail
6850 if t == h {
6851 return nil, false
6852 }
6853 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6854 if atomic.CasRel(&pp.runqhead, h, h+1) {
6855 return gp, false
6856 }
6857 }
6858 }
6859
6860
6861
6862 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6863 oldNext := pp.runnext
6864 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6865 drainQ.pushBack(oldNext.ptr())
6866 n++
6867 }
6868
6869 retry:
6870 h := atomic.LoadAcq(&pp.runqhead)
6871 t := pp.runqtail
6872 qn := t - h
6873 if qn == 0 {
6874 return
6875 }
6876 if qn > uint32(len(pp.runq)) {
6877 goto retry
6878 }
6879
6880 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6881 goto retry
6882 }
6883
6884
6885
6886
6887
6888
6889
6890
6891 for i := uint32(0); i < qn; i++ {
6892 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6893 drainQ.pushBack(gp)
6894 n++
6895 }
6896 return
6897 }
6898
6899
6900
6901
6902
6903 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6904 for {
6905 h := atomic.LoadAcq(&pp.runqhead)
6906 t := atomic.LoadAcq(&pp.runqtail)
6907 n := t - h
6908 n = n - n/2
6909 if n == 0 {
6910 if stealRunNextG {
6911
6912 if next := pp.runnext; next != 0 {
6913 if pp.status == _Prunning {
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924 if !osHasLowResTimer {
6925 usleep(3)
6926 } else {
6927
6928
6929
6930 osyield()
6931 }
6932 }
6933 if !pp.runnext.cas(next, 0) {
6934 continue
6935 }
6936 batch[batchHead%uint32(len(batch))] = next
6937 return 1
6938 }
6939 }
6940 return 0
6941 }
6942 if n > uint32(len(pp.runq)/2) {
6943 continue
6944 }
6945 for i := uint32(0); i < n; i++ {
6946 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6947 batch[(batchHead+i)%uint32(len(batch))] = g
6948 }
6949 if atomic.CasRel(&pp.runqhead, h, h+n) {
6950 return n
6951 }
6952 }
6953 }
6954
6955
6956
6957
6958 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6959 t := pp.runqtail
6960 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6961 if n == 0 {
6962 return nil
6963 }
6964 n--
6965 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6966 if n == 0 {
6967 return gp
6968 }
6969 h := atomic.LoadAcq(&pp.runqhead)
6970 if t-h+n >= uint32(len(pp.runq)) {
6971 throw("runqsteal: runq overflow")
6972 }
6973 atomic.StoreRel(&pp.runqtail, t+n)
6974 return gp
6975 }
6976
6977
6978
6979 type gQueue struct {
6980 head guintptr
6981 tail guintptr
6982 }
6983
6984
6985 func (q *gQueue) empty() bool {
6986 return q.head == 0
6987 }
6988
6989
6990 func (q *gQueue) push(gp *g) {
6991 gp.schedlink = q.head
6992 q.head.set(gp)
6993 if q.tail == 0 {
6994 q.tail.set(gp)
6995 }
6996 }
6997
6998
6999 func (q *gQueue) pushBack(gp *g) {
7000 gp.schedlink = 0
7001 if q.tail != 0 {
7002 q.tail.ptr().schedlink.set(gp)
7003 } else {
7004 q.head.set(gp)
7005 }
7006 q.tail.set(gp)
7007 }
7008
7009
7010
7011 func (q *gQueue) pushBackAll(q2 gQueue) {
7012 if q2.tail == 0 {
7013 return
7014 }
7015 q2.tail.ptr().schedlink = 0
7016 if q.tail != 0 {
7017 q.tail.ptr().schedlink = q2.head
7018 } else {
7019 q.head = q2.head
7020 }
7021 q.tail = q2.tail
7022 }
7023
7024
7025
7026 func (q *gQueue) pop() *g {
7027 gp := q.head.ptr()
7028 if gp != nil {
7029 q.head = gp.schedlink
7030 if q.head == 0 {
7031 q.tail = 0
7032 }
7033 }
7034 return gp
7035 }
7036
7037
7038 func (q *gQueue) popList() gList {
7039 stack := gList{q.head}
7040 *q = gQueue{}
7041 return stack
7042 }
7043
7044
7045
7046 type gList struct {
7047 head guintptr
7048 }
7049
7050
7051 func (l *gList) empty() bool {
7052 return l.head == 0
7053 }
7054
7055
7056 func (l *gList) push(gp *g) {
7057 gp.schedlink = l.head
7058 l.head.set(gp)
7059 }
7060
7061
7062 func (l *gList) pushAll(q gQueue) {
7063 if !q.empty() {
7064 q.tail.ptr().schedlink = l.head
7065 l.head = q.head
7066 }
7067 }
7068
7069
7070 func (l *gList) pop() *g {
7071 gp := l.head.ptr()
7072 if gp != nil {
7073 l.head = gp.schedlink
7074 }
7075 return gp
7076 }
7077
7078
7079 func setMaxThreads(in int) (out int) {
7080 lock(&sched.lock)
7081 out = int(sched.maxmcount)
7082 if in > 0x7fffffff {
7083 sched.maxmcount = 0x7fffffff
7084 } else {
7085 sched.maxmcount = int32(in)
7086 }
7087 checkmcount()
7088 unlock(&sched.lock)
7089 return
7090 }
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104 func procPin() int {
7105 gp := getg()
7106 mp := gp.m
7107
7108 mp.locks++
7109 return int(mp.p.ptr().id)
7110 }
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124 func procUnpin() {
7125 gp := getg()
7126 gp.m.locks--
7127 }
7128
7129
7130
7131 func sync_runtime_procPin() int {
7132 return procPin()
7133 }
7134
7135
7136
7137 func sync_runtime_procUnpin() {
7138 procUnpin()
7139 }
7140
7141
7142
7143 func sync_atomic_runtime_procPin() int {
7144 return procPin()
7145 }
7146
7147
7148
7149 func sync_atomic_runtime_procUnpin() {
7150 procUnpin()
7151 }
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167 func sync_runtime_canSpin(i int) bool {
7168
7169
7170
7171
7172
7173 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7174 return false
7175 }
7176 if p := getg().m.p.ptr(); !runqempty(p) {
7177 return false
7178 }
7179 return true
7180 }
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194 func sync_runtime_doSpin() {
7195 procyield(active_spin_cnt)
7196 }
7197
7198 var stealOrder randomOrder
7199
7200
7201
7202
7203
7204 type randomOrder struct {
7205 count uint32
7206 coprimes []uint32
7207 }
7208
7209 type randomEnum struct {
7210 i uint32
7211 count uint32
7212 pos uint32
7213 inc uint32
7214 }
7215
7216 func (ord *randomOrder) reset(count uint32) {
7217 ord.count = count
7218 ord.coprimes = ord.coprimes[:0]
7219 for i := uint32(1); i <= count; i++ {
7220 if gcd(i, count) == 1 {
7221 ord.coprimes = append(ord.coprimes, i)
7222 }
7223 }
7224 }
7225
7226 func (ord *randomOrder) start(i uint32) randomEnum {
7227 return randomEnum{
7228 count: ord.count,
7229 pos: i % ord.count,
7230 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7231 }
7232 }
7233
7234 func (enum *randomEnum) done() bool {
7235 return enum.i == enum.count
7236 }
7237
7238 func (enum *randomEnum) next() {
7239 enum.i++
7240 enum.pos = (enum.pos + enum.inc) % enum.count
7241 }
7242
7243 func (enum *randomEnum) position() uint32 {
7244 return enum.pos
7245 }
7246
7247 func gcd(a, b uint32) uint32 {
7248 for b != 0 {
7249 a, b = b, a%b
7250 }
7251 return a
7252 }
7253
7254
7255
7256 type initTask struct {
7257 state uint32
7258 nfns uint32
7259
7260 }
7261
7262
7263
7264 var inittrace tracestat
7265
7266 type tracestat struct {
7267 active bool
7268 id uint64
7269 allocs uint64
7270 bytes uint64
7271 }
7272
7273 func doInit(ts []*initTask) {
7274 for _, t := range ts {
7275 doInit1(t)
7276 }
7277 }
7278
7279 func doInit1(t *initTask) {
7280 switch t.state {
7281 case 2:
7282 return
7283 case 1:
7284 throw("recursive call during initialization - linker skew")
7285 default:
7286 t.state = 1
7287
7288 var (
7289 start int64
7290 before tracestat
7291 )
7292
7293 if inittrace.active {
7294 start = nanotime()
7295
7296 before = inittrace
7297 }
7298
7299 if t.nfns == 0 {
7300
7301 throw("inittask with no functions")
7302 }
7303
7304 firstFunc := add(unsafe.Pointer(t), 8)
7305 for i := uint32(0); i < t.nfns; i++ {
7306 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7307 f := *(*func())(unsafe.Pointer(&p))
7308 f()
7309 }
7310
7311 if inittrace.active {
7312 end := nanotime()
7313
7314 after := inittrace
7315
7316 f := *(*func())(unsafe.Pointer(&firstFunc))
7317 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7318
7319 var sbuf [24]byte
7320 print("init ", pkg, " @")
7321 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7322 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7323 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7324 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7325 print("\n")
7326 }
7327
7328 t.state = 2
7329 }
7330 }
7331
View as plain text