Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132 var mainInitDone atomic.Bool
133
134
135
136
137 var mainInitDoneChan chan bool
138
139
140 func main_main()
141
142
143 var mainStarted bool
144
145
146 var runtimeInitTime int64
147
148
149 var initSigmask sigset
150
151
152 func main() {
153 mp := getg().m
154
155
156
157 mp.g0.racectx = 0
158
159
160
161
162 if goarch.PtrSize == 8 {
163 maxstacksize = 1000000000
164 } else {
165 maxstacksize = 250000000
166 }
167
168
169
170
171 maxstackceiling = 2 * maxstacksize
172
173
174 mainStarted = true
175
176 if haveSysmon {
177 systemstack(func() {
178 newm(sysmon, nil, -1)
179 })
180 }
181
182
183
184
185
186
187
188 lockOSThread()
189
190 if mp != &m0 {
191 throw("runtime.main not on m0")
192 }
193
194
195
196 runtimeInitTime = nanotime()
197 if runtimeInitTime == 0 {
198 throw("nanotime returning zero")
199 }
200
201 if debug.inittrace != 0 {
202 inittrace.id = getg().goid
203 inittrace.active = true
204 }
205
206 doInit(runtime_inittasks)
207
208
209 needUnlock := true
210 defer func() {
211 if needUnlock {
212 unlockOSThread()
213 }
214 }()
215
216 gcenable()
217 defaultGOMAXPROCSUpdateEnable()
218
219 mainInitDoneChan = make(chan bool)
220 if iscgo {
221 if _cgo_pthread_key_created == nil {
222 throw("_cgo_pthread_key_created missing")
223 }
224
225 if _cgo_thread_start == nil {
226 throw("_cgo_thread_start missing")
227 }
228 if GOOS != "windows" {
229 if _cgo_setenv == nil {
230 throw("_cgo_setenv missing")
231 }
232 if _cgo_unsetenv == nil {
233 throw("_cgo_unsetenv missing")
234 }
235 }
236 if _cgo_notify_runtime_init_done == nil {
237 throw("_cgo_notify_runtime_init_done missing")
238 }
239
240
241 if set_crosscall2 == nil {
242 throw("set_crosscall2 missing")
243 }
244 set_crosscall2()
245
246
247
248 startTemplateThread()
249 cgocall(_cgo_notify_runtime_init_done, nil)
250 }
251
252
253
254
255
256
257
258
259 last := lastmoduledatap
260 for m := &firstmoduledata; true; m = m.next {
261 doInit(m.inittasks)
262 if m == last {
263 break
264 }
265 }
266
267
268
269 inittrace.active = false
270
271 mainInitDone.Store(true)
272 close(mainInitDoneChan)
273
274 needUnlock = false
275 unlockOSThread()
276
277 if isarchive || islibrary {
278
279
280 if GOARCH == "wasm" {
281
282
283
284
285
286
287
288 pause(sys.GetCallerSP() - 16)
289 panic("unreachable")
290 }
291 return
292 }
293 fn := main_main
294 fn()
295
296
297
298
299
300
301
302
303 exitHooksRun := false
304 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
305 runExitHooks(0)
306 exitHooksRun = true
307 lsandoleakcheck()
308 }
309
310
311
312
313
314 if runningPanicDefers.Load() != 0 {
315
316 for c := 0; c < 1000; c++ {
317 if runningPanicDefers.Load() == 0 {
318 break
319 }
320 Gosched()
321 }
322 }
323 if panicking.Load() != 0 {
324 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
325 }
326 if !exitHooksRun {
327 runExitHooks(0)
328 }
329 if raceenabled {
330 racefini()
331 }
332
333 exit(0)
334 for {
335 var x *int32
336 *x = 0
337 }
338 }
339
340
341
342
343 func os_beforeExit(exitCode int) {
344 runExitHooks(exitCode)
345 if exitCode == 0 && raceenabled {
346 racefini()
347 }
348
349
350 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
351 lsandoleakcheck()
352 }
353 }
354
355 func init() {
356 exithook.Gosched = Gosched
357 exithook.Goid = func() uint64 { return getg().goid }
358 exithook.Throw = throw
359 }
360
361 func runExitHooks(code int) {
362 exithook.Run(code)
363 }
364
365
366 func init() {
367 go forcegchelper()
368 }
369
370 func forcegchelper() {
371 forcegc.g = getg()
372 lockInit(&forcegc.lock, lockRankForcegc)
373 for {
374 lock(&forcegc.lock)
375 if forcegc.idle.Load() {
376 throw("forcegc: phase error")
377 }
378 forcegc.idle.Store(true)
379 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
380
381 if debug.gctrace > 0 {
382 println("GC forced")
383 }
384
385 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
386 }
387 }
388
389
390
391
392
393 func Gosched() {
394 checkTimeouts()
395 mcall(gosched_m)
396 }
397
398
399
400
401
402 func goschedguarded() {
403 mcall(goschedguarded_m)
404 }
405
406
407
408
409
410
411 func goschedIfBusy() {
412 gp := getg()
413
414
415 if !gp.preempt && sched.npidle.Load() > 0 {
416 return
417 }
418 mcall(gosched_m)
419 }
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
450 if reason != waitReasonSleep {
451 checkTimeouts()
452 }
453 mp := acquirem()
454 gp := mp.curg
455 status := readgstatus(gp)
456 if status != _Grunning && status != _Gscanrunning {
457 throw("gopark: bad g status")
458 }
459 mp.waitlock = lock
460 mp.waitunlockf = unlockf
461 gp.waitreason = reason
462 mp.waitTraceBlockReason = traceReason
463 mp.waitTraceSkip = traceskip
464 releasem(mp)
465
466 mcall(park_m)
467 }
468
469
470
471 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
472 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
473 }
474
475
476
477
478
479
480
481
482
483
484
485 func goready(gp *g, traceskip int) {
486 systemstack(func() {
487 ready(gp, traceskip, true)
488 })
489 }
490
491
492 func acquireSudog() *sudog {
493
494
495
496
497
498
499
500
501 mp := acquirem()
502 pp := mp.p.ptr()
503 if len(pp.sudogcache) == 0 {
504 lock(&sched.sudoglock)
505
506 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
507 s := sched.sudogcache
508 sched.sudogcache = s.next
509 s.next = nil
510 pp.sudogcache = append(pp.sudogcache, s)
511 }
512 unlock(&sched.sudoglock)
513
514 if len(pp.sudogcache) == 0 {
515 pp.sudogcache = append(pp.sudogcache, new(sudog))
516 }
517 }
518 n := len(pp.sudogcache)
519 s := pp.sudogcache[n-1]
520 pp.sudogcache[n-1] = nil
521 pp.sudogcache = pp.sudogcache[:n-1]
522 if s.elem.get() != nil {
523 throw("acquireSudog: found s.elem != nil in cache")
524 }
525 releasem(mp)
526 return s
527 }
528
529
530 func releaseSudog(s *sudog) {
531 if s.elem.get() != nil {
532 throw("runtime: sudog with non-nil elem")
533 }
534 if s.isSelect {
535 throw("runtime: sudog with non-false isSelect")
536 }
537 if s.next != nil {
538 throw("runtime: sudog with non-nil next")
539 }
540 if s.prev != nil {
541 throw("runtime: sudog with non-nil prev")
542 }
543 if s.waitlink != nil {
544 throw("runtime: sudog with non-nil waitlink")
545 }
546 if s.c.get() != nil {
547 throw("runtime: sudog with non-nil c")
548 }
549 gp := getg()
550 if gp.param != nil {
551 throw("runtime: releaseSudog with non-nil gp.param")
552 }
553 mp := acquirem()
554 pp := mp.p.ptr()
555 if len(pp.sudogcache) == cap(pp.sudogcache) {
556
557 var first, last *sudog
558 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
559 n := len(pp.sudogcache)
560 p := pp.sudogcache[n-1]
561 pp.sudogcache[n-1] = nil
562 pp.sudogcache = pp.sudogcache[:n-1]
563 if first == nil {
564 first = p
565 } else {
566 last.next = p
567 }
568 last = p
569 }
570 lock(&sched.sudoglock)
571 last.next = sched.sudogcache
572 sched.sudogcache = first
573 unlock(&sched.sudoglock)
574 }
575 pp.sudogcache = append(pp.sudogcache, s)
576 releasem(mp)
577 }
578
579
580 func badmcall(fn func(*g)) {
581 throw("runtime: mcall called on m->g0 stack")
582 }
583
584 func badmcall2(fn func(*g)) {
585 throw("runtime: mcall function returned")
586 }
587
588 func badreflectcall() {
589 panic(plainError("arg size to reflect.call more than 1GB"))
590 }
591
592
593
594 func badmorestackg0() {
595 if !crashStackImplemented {
596 writeErrStr("fatal: morestack on g0\n")
597 return
598 }
599
600 g := getg()
601 switchToCrashStack(func() {
602 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
603 g.m.traceback = 2
604 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
605 print("\n")
606
607 throw("morestack on g0")
608 })
609 }
610
611
612
613 func badmorestackgsignal() {
614 writeErrStr("fatal: morestack on gsignal\n")
615 }
616
617
618 func badctxt() {
619 throw("ctxt != 0")
620 }
621
622
623
624 var gcrash g
625
626 var crashingG atomic.Pointer[g]
627
628
629
630
631
632
633
634
635
636 func switchToCrashStack(fn func()) {
637 me := getg()
638 if crashingG.CompareAndSwapNoWB(nil, me) {
639 switchToCrashStack0(fn)
640 abort()
641 }
642 if crashingG.Load() == me {
643
644 writeErrStr("fatal: recursive switchToCrashStack\n")
645 abort()
646 }
647
648 usleep_no_g(100)
649 writeErrStr("fatal: concurrent switchToCrashStack\n")
650 abort()
651 }
652
653
654
655
656 const crashStackImplemented = GOOS != "windows"
657
658
659 func switchToCrashStack0(fn func())
660
661 func lockedOSThread() bool {
662 gp := getg()
663 return gp.lockedm != 0 && gp.m.lockedg != 0
664 }
665
666 var (
667
668
669
670
671
672
673 allglock mutex
674 allgs []*g
675
676
677
678
679
680
681
682
683
684
685
686
687
688 allglen uintptr
689 allgptr **g
690 )
691
692 func allgadd(gp *g) {
693 if readgstatus(gp) == _Gidle {
694 throw("allgadd: bad status Gidle")
695 }
696
697 lock(&allglock)
698 allgs = append(allgs, gp)
699 if &allgs[0] != allgptr {
700 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
701 }
702 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
703 unlock(&allglock)
704 }
705
706
707
708
709 func allGsSnapshot() []*g {
710 assertWorldStoppedOrLockHeld(&allglock)
711
712
713
714
715
716
717 return allgs[:len(allgs):len(allgs)]
718 }
719
720
721 func atomicAllG() (**g, uintptr) {
722 length := atomic.Loaduintptr(&allglen)
723 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
724 return ptr, length
725 }
726
727
728 func atomicAllGIndex(ptr **g, i uintptr) *g {
729 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
730 }
731
732
733
734
735 func forEachG(fn func(gp *g)) {
736 lock(&allglock)
737 for _, gp := range allgs {
738 fn(gp)
739 }
740 unlock(&allglock)
741 }
742
743
744
745
746
747 func forEachGRace(fn func(gp *g)) {
748 ptr, length := atomicAllG()
749 for i := uintptr(0); i < length; i++ {
750 gp := atomicAllGIndex(ptr, i)
751 fn(gp)
752 }
753 return
754 }
755
756 const (
757
758
759 _GoidCacheBatch = 16
760 )
761
762
763
764 func cpuinit(env string) {
765 cpu.Initialize(env)
766
767
768
769 switch GOARCH {
770 case "386", "amd64":
771 x86HasAVX = cpu.X86.HasAVX
772 x86HasFMA = cpu.X86.HasFMA
773 x86HasPOPCNT = cpu.X86.HasPOPCNT
774 x86HasSSE41 = cpu.X86.HasSSE41
775
776 case "arm":
777 armHasVFPv4 = cpu.ARM.HasVFPv4
778
779 case "arm64":
780 arm64HasATOMICS = cpu.ARM64.HasATOMICS
781
782 case "loong64":
783 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
784 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
785 loong64HasLSX = cpu.Loong64.HasLSX
786
787 case "riscv64":
788 riscv64HasZbb = cpu.RISCV64.HasZbb
789 }
790 }
791
792
793
794
795
796
797 func getGodebugEarly() (string, bool) {
798 const prefix = "GODEBUG="
799 var env string
800 switch GOOS {
801 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
802
803
804
805 n := int32(0)
806 for argv_index(argv, argc+1+n) != nil {
807 n++
808 }
809
810 for i := int32(0); i < n; i++ {
811 p := argv_index(argv, argc+1+i)
812 s := unsafe.String(p, findnull(p))
813
814 if stringslite.HasPrefix(s, prefix) {
815 env = gostringnocopy(p)[len(prefix):]
816 break
817 }
818 }
819 break
820
821 default:
822 return "", false
823 }
824 return env, true
825 }
826
827
828
829
830
831
832
833
834
835 func schedinit() {
836 lockInit(&sched.lock, lockRankSched)
837 lockInit(&sched.sysmonlock, lockRankSysmon)
838 lockInit(&sched.deferlock, lockRankDefer)
839 lockInit(&sched.sudoglock, lockRankSudog)
840 lockInit(&deadlock, lockRankDeadlock)
841 lockInit(&paniclk, lockRankPanic)
842 lockInit(&allglock, lockRankAllg)
843 lockInit(&allpLock, lockRankAllp)
844 lockInit(&reflectOffs.lock, lockRankReflectOffs)
845 lockInit(&finlock, lockRankFin)
846 lockInit(&cpuprof.lock, lockRankCpuprof)
847 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
848 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
849 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
850 traceLockInit()
851
852
853
854 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
855
856 lockVerifyMSize()
857
858 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
859
860
861
862 gp := getg()
863 if raceenabled {
864 gp.racectx, raceprocctx0 = raceinit()
865 }
866
867 sched.maxmcount = 10000
868 crashFD.Store(^uintptr(0))
869
870
871 worldStopped()
872
873 godebug, parsedGodebug := getGodebugEarly()
874 if parsedGodebug {
875 parseRuntimeDebugVars(godebug)
876 }
877 ticks.init()
878 moduledataverify()
879 stackinit()
880 randinit()
881 mallocinit()
882 cpuinit(godebug)
883 alginit()
884 mcommoninit(gp.m, -1)
885 modulesinit()
886 typelinksinit()
887 itabsinit()
888 stkobjinit()
889
890 sigsave(&gp.m.sigmask)
891 initSigmask = gp.m.sigmask
892
893 goargs()
894 goenvs()
895 secure()
896 checkfds()
897 if !parsedGodebug {
898
899
900 parseRuntimeDebugVars(gogetenv("GODEBUG"))
901 }
902 finishDebugVarsSetup()
903 gcinit()
904
905
906
907 gcrash.stack = stackalloc(16384)
908 gcrash.stackguard0 = gcrash.stack.lo + 1000
909 gcrash.stackguard1 = gcrash.stack.lo + 1000
910
911
912
913
914
915 if disableMemoryProfiling {
916 MemProfileRate = 0
917 }
918
919
920 mProfStackInit(gp.m)
921 defaultGOMAXPROCSInit()
922
923 lock(&sched.lock)
924 sched.lastpoll.Store(nanotime())
925 var procs int32
926 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
927 procs = int32(n)
928 sched.customGOMAXPROCS = true
929 } else {
930
931
932
933
934
935
936
937
938 procs = defaultGOMAXPROCS(numCPUStartup)
939 }
940 if procresize(procs) != nil {
941 throw("unknown runnable goroutine during bootstrap")
942 }
943 unlock(&sched.lock)
944
945
946 worldStarted()
947
948 if buildVersion == "" {
949
950
951 buildVersion = "unknown"
952 }
953 if len(modinfo) == 1 {
954
955
956 modinfo = ""
957 }
958 }
959
960 func dumpgstatus(gp *g) {
961 thisg := getg()
962 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
963 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
964 }
965
966
967 func checkmcount() {
968 assertLockHeld(&sched.lock)
969
970
971
972
973
974
975
976
977
978 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
979 if count > sched.maxmcount {
980 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
981 throw("thread exhaustion")
982 }
983 }
984
985
986
987
988
989 func mReserveID() int64 {
990 assertLockHeld(&sched.lock)
991
992 if sched.mnext+1 < sched.mnext {
993 throw("runtime: thread ID overflow")
994 }
995 id := sched.mnext
996 sched.mnext++
997 checkmcount()
998 return id
999 }
1000
1001
1002 func mcommoninit(mp *m, id int64) {
1003 gp := getg()
1004
1005
1006 if gp != gp.m.g0 {
1007 callers(1, mp.createstack[:])
1008 }
1009
1010 lock(&sched.lock)
1011
1012 if id >= 0 {
1013 mp.id = id
1014 } else {
1015 mp.id = mReserveID()
1016 }
1017
1018 mp.self = newMWeakPointer(mp)
1019
1020 mrandinit(mp)
1021
1022 mpreinit(mp)
1023 if mp.gsignal != nil {
1024 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1025 }
1026
1027
1028
1029 mp.alllink = allm
1030
1031
1032
1033 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1034 unlock(&sched.lock)
1035
1036
1037 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1038 mp.cgoCallers = new(cgoCallers)
1039 }
1040 mProfStackInit(mp)
1041 }
1042
1043
1044
1045
1046
1047 func mProfStackInit(mp *m) {
1048 if debug.profstackdepth == 0 {
1049
1050
1051 return
1052 }
1053 mp.profStack = makeProfStackFP()
1054 mp.mLockProfile.stack = makeProfStackFP()
1055 }
1056
1057
1058
1059
1060 func makeProfStackFP() []uintptr {
1061
1062
1063
1064
1065
1066
1067 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1068 }
1069
1070
1071
1072 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1073
1074
1075 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1076
1077 func (mp *m) becomeSpinning() {
1078 mp.spinning = true
1079 sched.nmspinning.Add(1)
1080 sched.needspinning.Store(0)
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090 func (mp *m) snapshotAllp() []*p {
1091 mp.allpSnapshot = allp
1092 return mp.allpSnapshot
1093 }
1094
1095
1096
1097
1098
1099
1100
1101 func (mp *m) clearAllpSnapshot() {
1102 mp.allpSnapshot = nil
1103 }
1104
1105 func (mp *m) hasCgoOnStack() bool {
1106 return mp.ncgo > 0 || mp.isextra
1107 }
1108
1109 const (
1110
1111
1112 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1113
1114
1115
1116 osHasLowResClockInt = goos.IsWindows
1117
1118
1119
1120 osHasLowResClock = osHasLowResClockInt > 0
1121 )
1122
1123
1124 func ready(gp *g, traceskip int, next bool) {
1125 status := readgstatus(gp)
1126
1127
1128 mp := acquirem()
1129 if status&^_Gscan != _Gwaiting {
1130 dumpgstatus(gp)
1131 throw("bad g->status in ready")
1132 }
1133
1134
1135 trace := traceAcquire()
1136 casgstatus(gp, _Gwaiting, _Grunnable)
1137 if trace.ok() {
1138 trace.GoUnpark(gp, traceskip)
1139 traceRelease(trace)
1140 }
1141 runqput(mp.p.ptr(), gp, next)
1142 wakep()
1143 releasem(mp)
1144 }
1145
1146
1147
1148 const freezeStopWait = 0x7fffffff
1149
1150
1151
1152 var freezing atomic.Bool
1153
1154
1155
1156
1157 func freezetheworld() {
1158 freezing.Store(true)
1159 if debug.dontfreezetheworld > 0 {
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 usleep(1000)
1185 return
1186 }
1187
1188
1189
1190
1191 for i := 0; i < 5; i++ {
1192
1193 sched.stopwait = freezeStopWait
1194 sched.gcwaiting.Store(true)
1195
1196 if !preemptall() {
1197 break
1198 }
1199 usleep(1000)
1200 }
1201
1202 usleep(1000)
1203 preemptall()
1204 usleep(1000)
1205 }
1206
1207
1208
1209
1210
1211 func readgstatus(gp *g) uint32 {
1212 return gp.atomicstatus.Load()
1213 }
1214
1215
1216
1217
1218
1219 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1220 success := false
1221
1222
1223 switch oldval {
1224 default:
1225 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1226 dumpgstatus(gp)
1227 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1228 case _Gscanrunnable,
1229 _Gscanwaiting,
1230 _Gscanrunning,
1231 _Gscansyscall,
1232 _Gscanleaked,
1233 _Gscanpreempted,
1234 _Gscandeadextra:
1235 if newval == oldval&^_Gscan {
1236 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1237 }
1238 }
1239 if !success {
1240 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1241 dumpgstatus(gp)
1242 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1243 }
1244 releaseLockRankAndM(lockRankGscan)
1245 }
1246
1247
1248
1249 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1250 switch oldval {
1251 case _Grunnable,
1252 _Grunning,
1253 _Gwaiting,
1254 _Gleaked,
1255 _Gsyscall,
1256 _Gdeadextra:
1257 if newval == oldval|_Gscan {
1258 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1259 if r {
1260 acquireLockRankAndM(lockRankGscan)
1261 }
1262 return r
1263
1264 }
1265 }
1266 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1267 throw("bad oldval passed to castogscanstatus")
1268 return false
1269 }
1270
1271
1272
1273 var casgstatusAlwaysTrack = false
1274
1275
1276
1277
1278
1279
1280
1281 func casgstatus(gp *g, oldval, newval uint32) {
1282 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1283 systemstack(func() {
1284
1285
1286 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1287 throw("casgstatus: bad incoming values")
1288 })
1289 }
1290
1291 lockWithRankMayAcquire(nil, lockRankGscan)
1292
1293
1294 const yieldDelay = 5 * 1000
1295 var nextYield int64
1296
1297
1298
1299 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1300 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1301 systemstack(func() {
1302
1303
1304 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1305 })
1306 }
1307 if i == 0 {
1308 nextYield = nanotime() + yieldDelay
1309 }
1310 if nanotime() < nextYield {
1311 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1312 procyield(1)
1313 }
1314 } else {
1315 osyield()
1316 nextYield = nanotime() + yieldDelay/2
1317 }
1318 }
1319
1320 if gp.bubble != nil {
1321 systemstack(func() {
1322 gp.bubble.changegstatus(gp, oldval, newval)
1323 })
1324 }
1325
1326 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1327
1328
1329 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1330 gp.tracking = true
1331 }
1332 gp.trackingSeq++
1333 }
1334 if !gp.tracking {
1335 return
1336 }
1337
1338
1339
1340
1341
1342
1343 switch oldval {
1344 case _Grunnable:
1345
1346
1347
1348 now := nanotime()
1349 gp.runnableTime += now - gp.trackingStamp
1350 gp.trackingStamp = 0
1351 case _Gwaiting:
1352 if !gp.waitreason.isMutexWait() {
1353
1354 break
1355 }
1356
1357
1358
1359
1360
1361 now := nanotime()
1362 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1363 gp.trackingStamp = 0
1364 }
1365 switch newval {
1366 case _Gwaiting:
1367 if !gp.waitreason.isMutexWait() {
1368
1369 break
1370 }
1371
1372 now := nanotime()
1373 gp.trackingStamp = now
1374 case _Grunnable:
1375
1376
1377 now := nanotime()
1378 gp.trackingStamp = now
1379 case _Grunning:
1380
1381
1382
1383 gp.tracking = false
1384 sched.timeToRun.record(gp.runnableTime)
1385 gp.runnableTime = 0
1386 }
1387 }
1388
1389
1390
1391
1392 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1393
1394 gp.waitreason = reason
1395 casgstatus(gp, old, _Gwaiting)
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1406 if !reason.isWaitingForSuspendG() {
1407 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1408 }
1409 casGToWaiting(gp, old, reason)
1410 }
1411
1412
1413
1414
1415
1416 func casGToPreemptScan(gp *g, old, new uint32) {
1417 if old != _Grunning || new != _Gscan|_Gpreempted {
1418 throw("bad g transition")
1419 }
1420 acquireLockRankAndM(lockRankGscan)
1421 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1422 }
1423
1424
1425
1426
1427
1428
1429 }
1430
1431
1432
1433
1434 func casGFromPreempted(gp *g, old, new uint32) bool {
1435 if old != _Gpreempted || new != _Gwaiting {
1436 throw("bad g transition")
1437 }
1438 gp.waitreason = waitReasonPreempted
1439 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1440 return false
1441 }
1442 if bubble := gp.bubble; bubble != nil {
1443 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1444 }
1445 return true
1446 }
1447
1448
1449 type stwReason uint8
1450
1451
1452
1453
1454 const (
1455 stwUnknown stwReason = iota
1456 stwGCMarkTerm
1457 stwGCSweepTerm
1458 stwWriteHeapDump
1459 stwGoroutineProfile
1460 stwGoroutineProfileCleanup
1461 stwAllGoroutinesStack
1462 stwReadMemStats
1463 stwAllThreadsSyscall
1464 stwGOMAXPROCS
1465 stwStartTrace
1466 stwStopTrace
1467 stwForTestCountPagesInUse
1468 stwForTestReadMetricsSlow
1469 stwForTestReadMemStatsSlow
1470 stwForTestPageCachePagesLeaked
1471 stwForTestResetDebugLog
1472 )
1473
1474 func (r stwReason) String() string {
1475 return stwReasonStrings[r]
1476 }
1477
1478 func (r stwReason) isGC() bool {
1479 return r == stwGCMarkTerm || r == stwGCSweepTerm
1480 }
1481
1482
1483
1484
1485 var stwReasonStrings = [...]string{
1486 stwUnknown: "unknown",
1487 stwGCMarkTerm: "GC mark termination",
1488 stwGCSweepTerm: "GC sweep termination",
1489 stwWriteHeapDump: "write heap dump",
1490 stwGoroutineProfile: "goroutine profile",
1491 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1492 stwAllGoroutinesStack: "all goroutines stack trace",
1493 stwReadMemStats: "read mem stats",
1494 stwAllThreadsSyscall: "AllThreadsSyscall",
1495 stwGOMAXPROCS: "GOMAXPROCS",
1496 stwStartTrace: "start trace",
1497 stwStopTrace: "stop trace",
1498 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1499 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1500 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1501 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1502 stwForTestResetDebugLog: "ResetDebugLog (test)",
1503 }
1504
1505
1506
1507 type worldStop struct {
1508 reason stwReason
1509 startedStopping int64
1510 finishedStopping int64
1511 stoppingCPUTime int64
1512 }
1513
1514
1515
1516
1517 var stopTheWorldContext worldStop
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 func stopTheWorld(reason stwReason) worldStop {
1537 semacquire(&worldsema)
1538 gp := getg()
1539 gp.m.preemptoff = reason.String()
1540 systemstack(func() {
1541 stopTheWorldContext = stopTheWorldWithSema(reason)
1542 })
1543 return stopTheWorldContext
1544 }
1545
1546
1547
1548
1549 func startTheWorld(w worldStop) {
1550 systemstack(func() { startTheWorldWithSema(0, w) })
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 mp := acquirem()
1568 mp.preemptoff = ""
1569 semrelease1(&worldsema, true, 0)
1570 releasem(mp)
1571 }
1572
1573
1574
1575
1576 func stopTheWorldGC(reason stwReason) worldStop {
1577 semacquire(&gcsema)
1578 return stopTheWorld(reason)
1579 }
1580
1581
1582
1583
1584 func startTheWorldGC(w worldStop) {
1585 startTheWorld(w)
1586 semrelease(&gcsema)
1587 }
1588
1589
1590 var worldsema uint32 = 1
1591
1592
1593
1594
1595
1596
1597
1598 var gcsema uint32 = 1
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 func stopTheWorldWithSema(reason stwReason) worldStop {
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1646
1647 trace := traceAcquire()
1648 if trace.ok() {
1649 trace.STWStart(reason)
1650 traceRelease(trace)
1651 }
1652 gp := getg()
1653
1654
1655
1656 if gp.m.locks > 0 {
1657 throw("stopTheWorld: holding locks")
1658 }
1659
1660 lock(&sched.lock)
1661 start := nanotime()
1662 sched.stopwait = gomaxprocs
1663 sched.gcwaiting.Store(true)
1664 preemptall()
1665
1666
1667 gp.m.p.ptr().status = _Pgcstop
1668 gp.m.p.ptr().gcStopTime = start
1669 sched.stopwait--
1670
1671
1672 for _, pp := range allp {
1673 if thread, ok := setBlockOnExitSyscall(pp); ok {
1674 thread.gcstopP()
1675 thread.resume()
1676 }
1677 }
1678
1679
1680 now := nanotime()
1681 for {
1682 pp, _ := pidleget(now)
1683 if pp == nil {
1684 break
1685 }
1686 pp.status = _Pgcstop
1687 pp.gcStopTime = nanotime()
1688 sched.stopwait--
1689 }
1690 wait := sched.stopwait > 0
1691 unlock(&sched.lock)
1692
1693
1694 if wait {
1695 for {
1696
1697 if notetsleep(&sched.stopnote, 100*1000) {
1698 noteclear(&sched.stopnote)
1699 break
1700 }
1701 preemptall()
1702 }
1703 }
1704
1705 finish := nanotime()
1706 startTime := finish - start
1707 if reason.isGC() {
1708 sched.stwStoppingTimeGC.record(startTime)
1709 } else {
1710 sched.stwStoppingTimeOther.record(startTime)
1711 }
1712
1713
1714
1715
1716
1717 stoppingCPUTime := int64(0)
1718 bad := ""
1719 if sched.stopwait != 0 {
1720 bad = "stopTheWorld: not stopped (stopwait != 0)"
1721 } else {
1722 for _, pp := range allp {
1723 if pp.status != _Pgcstop {
1724 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1725 }
1726 if pp.gcStopTime == 0 && bad == "" {
1727 bad = "stopTheWorld: broken CPU time accounting"
1728 }
1729 stoppingCPUTime += finish - pp.gcStopTime
1730 pp.gcStopTime = 0
1731 }
1732 }
1733 if freezing.Load() {
1734
1735
1736
1737
1738 lock(&deadlock)
1739 lock(&deadlock)
1740 }
1741 if bad != "" {
1742 throw(bad)
1743 }
1744
1745 worldStopped()
1746
1747
1748 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1749
1750 return worldStop{
1751 reason: reason,
1752 startedStopping: start,
1753 finishedStopping: finish,
1754 stoppingCPUTime: stoppingCPUTime,
1755 }
1756 }
1757
1758
1759
1760
1761
1762
1763
1764 func startTheWorldWithSema(now int64, w worldStop) int64 {
1765 assertWorldStopped()
1766
1767 mp := acquirem()
1768 if netpollinited() {
1769 list, delta := netpoll(0)
1770 injectglist(&list)
1771 netpollAdjustWaiters(delta)
1772 }
1773 lock(&sched.lock)
1774
1775 procs := gomaxprocs
1776 if newprocs != 0 {
1777 procs = newprocs
1778 newprocs = 0
1779 }
1780 p1 := procresize(procs)
1781 sched.gcwaiting.Store(false)
1782 if sched.sysmonwait.Load() {
1783 sched.sysmonwait.Store(false)
1784 notewakeup(&sched.sysmonnote)
1785 }
1786 unlock(&sched.lock)
1787
1788 worldStarted()
1789
1790 for p1 != nil {
1791 p := p1
1792 p1 = p1.link.ptr()
1793 if p.m != 0 {
1794 mp := p.m.ptr()
1795 p.m = 0
1796 if mp.nextp != 0 {
1797 throw("startTheWorld: inconsistent mp->nextp")
1798 }
1799 mp.nextp.set(p)
1800 notewakeup(&mp.park)
1801 } else {
1802
1803 newm(nil, p, -1)
1804 }
1805 }
1806
1807
1808 if now == 0 {
1809 now = nanotime()
1810 }
1811 totalTime := now - w.startedStopping
1812 if w.reason.isGC() {
1813 sched.stwTotalTimeGC.record(totalTime)
1814 } else {
1815 sched.stwTotalTimeOther.record(totalTime)
1816 }
1817 trace := traceAcquire()
1818 if trace.ok() {
1819 trace.STWDone()
1820 traceRelease(trace)
1821 }
1822
1823
1824
1825
1826 wakep()
1827
1828 releasem(mp)
1829
1830 return now
1831 }
1832
1833
1834
1835 func usesLibcall() bool {
1836 switch GOOS {
1837 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1838 return true
1839 }
1840 return false
1841 }
1842
1843
1844
1845 func mStackIsSystemAllocated() bool {
1846 switch GOOS {
1847 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1848 return true
1849 }
1850 return false
1851 }
1852
1853
1854
1855 func mstart()
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866 func mstart0() {
1867 gp := getg()
1868
1869 osStack := gp.stack.lo == 0
1870 if osStack {
1871
1872
1873
1874
1875
1876
1877
1878
1879 size := gp.stack.hi
1880 if size == 0 {
1881 size = 16384 * sys.StackGuardMultiplier
1882 }
1883 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1884 gp.stack.lo = gp.stack.hi - size + 1024
1885 }
1886
1887
1888 gp.stackguard0 = gp.stack.lo + stackGuard
1889
1890
1891 gp.stackguard1 = gp.stackguard0
1892 mstart1()
1893
1894
1895 if mStackIsSystemAllocated() {
1896
1897
1898
1899 osStack = true
1900 }
1901 mexit(osStack)
1902 }
1903
1904
1905
1906
1907
1908 func mstart1() {
1909 gp := getg()
1910
1911 if gp != gp.m.g0 {
1912 throw("bad runtime·mstart")
1913 }
1914
1915
1916
1917
1918
1919
1920
1921 gp.sched.g = guintptr(unsafe.Pointer(gp))
1922 gp.sched.pc = sys.GetCallerPC()
1923 gp.sched.sp = sys.GetCallerSP()
1924
1925 asminit()
1926 minit()
1927
1928
1929
1930 if gp.m == &m0 {
1931 mstartm0()
1932 }
1933
1934 if debug.dataindependenttiming == 1 {
1935 sys.EnableDIT()
1936 }
1937
1938 if fn := gp.m.mstartfn; fn != nil {
1939 fn()
1940 }
1941
1942 if gp.m != &m0 {
1943 acquirep(gp.m.nextp.ptr())
1944 gp.m.nextp = 0
1945 }
1946 schedule()
1947 }
1948
1949
1950
1951
1952
1953
1954
1955 func mstartm0() {
1956
1957
1958
1959 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1960 cgoHasExtraM = true
1961 newextram()
1962 }
1963 initsig(false)
1964 }
1965
1966
1967
1968
1969 func mPark() {
1970 gp := getg()
1971 notesleep(&gp.m.park)
1972 noteclear(&gp.m.park)
1973 }
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985 func mexit(osStack bool) {
1986 mp := getg().m
1987
1988 if mp == &m0 {
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000 handoffp(releasep())
2001 lock(&sched.lock)
2002 sched.nmfreed++
2003 checkdead()
2004 unlock(&sched.lock)
2005 mPark()
2006 throw("locked m0 woke up")
2007 }
2008
2009 sigblock(true)
2010 unminit()
2011
2012
2013 if mp.gsignal != nil {
2014 stackfree(mp.gsignal.stack)
2015 if valgrindenabled {
2016 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2017 mp.gsignal.valgrindStackID = 0
2018 }
2019
2020
2021
2022
2023 mp.gsignal = nil
2024 }
2025
2026
2027 vgetrandomDestroy(mp)
2028
2029
2030
2031 mp.self.clear()
2032
2033
2034 lock(&sched.lock)
2035 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2036 if *pprev == mp {
2037 *pprev = mp.alllink
2038 goto found
2039 }
2040 }
2041 throw("m not found in allm")
2042 found:
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 mp.freeWait.Store(freeMWait)
2058 mp.freelink = sched.freem
2059 sched.freem = mp
2060 unlock(&sched.lock)
2061
2062 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2063 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2064
2065
2066 handoffp(releasep())
2067
2068
2069
2070
2071
2072 lock(&sched.lock)
2073 sched.nmfreed++
2074 checkdead()
2075 unlock(&sched.lock)
2076
2077 if GOOS == "darwin" || GOOS == "ios" {
2078
2079
2080 if mp.signalPending.Load() != 0 {
2081 pendingPreemptSignals.Add(-1)
2082 }
2083 }
2084
2085
2086
2087 mdestroy(mp)
2088
2089 if osStack {
2090
2091 mp.freeWait.Store(freeMRef)
2092
2093
2094
2095 return
2096 }
2097
2098
2099
2100
2101
2102 exitThread(&mp.freeWait)
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 func forEachP(reason waitReason, fn func(*p)) {
2116 systemstack(func() {
2117 gp := getg().m.curg
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129 casGToWaitingForSuspendG(gp, _Grunning, reason)
2130 forEachPInternal(fn)
2131 casgstatus(gp, _Gwaiting, _Grunning)
2132 })
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 func forEachPInternal(fn func(*p)) {
2145 mp := acquirem()
2146 pp := getg().m.p.ptr()
2147
2148 lock(&sched.lock)
2149 if sched.safePointWait != 0 {
2150 throw("forEachP: sched.safePointWait != 0")
2151 }
2152 sched.safePointWait = gomaxprocs - 1
2153 sched.safePointFn = fn
2154
2155
2156 for _, p2 := range allp {
2157 if p2 != pp {
2158 atomic.Store(&p2.runSafePointFn, 1)
2159 }
2160 }
2161 preemptall()
2162
2163
2164
2165
2166
2167
2168
2169 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2170 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2171 fn(p)
2172 sched.safePointWait--
2173 }
2174 }
2175
2176 wait := sched.safePointWait > 0
2177 unlock(&sched.lock)
2178
2179
2180 fn(pp)
2181
2182
2183
2184 for _, p2 := range allp {
2185 if atomic.Load(&p2.runSafePointFn) != 1 {
2186
2187 continue
2188 }
2189 if thread, ok := setBlockOnExitSyscall(p2); ok {
2190 thread.takeP()
2191 thread.resume()
2192 handoffp(p2)
2193 }
2194 }
2195
2196
2197 if wait {
2198 for {
2199
2200
2201
2202
2203 if notetsleep(&sched.safePointNote, 100*1000) {
2204 noteclear(&sched.safePointNote)
2205 break
2206 }
2207 preemptall()
2208 }
2209 }
2210 if sched.safePointWait != 0 {
2211 throw("forEachP: not done")
2212 }
2213 for _, p2 := range allp {
2214 if p2.runSafePointFn != 0 {
2215 throw("forEachP: P did not run fn")
2216 }
2217 }
2218
2219 lock(&sched.lock)
2220 sched.safePointFn = nil
2221 unlock(&sched.lock)
2222 releasem(mp)
2223 }
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236 func runSafePointFn() {
2237 p := getg().m.p.ptr()
2238
2239
2240
2241 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2242 return
2243 }
2244 sched.safePointFn(p)
2245 lock(&sched.lock)
2246 sched.safePointWait--
2247 if sched.safePointWait == 0 {
2248 notewakeup(&sched.safePointNote)
2249 }
2250 unlock(&sched.lock)
2251 }
2252
2253
2254
2255
2256 var cgoThreadStart unsafe.Pointer
2257
2258 type cgothreadstart struct {
2259 g guintptr
2260 tls *uint64
2261 fn unsafe.Pointer
2262 }
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 func allocm(pp *p, fn func(), id int64) *m {
2274 allocmLock.rlock()
2275
2276
2277
2278
2279 acquirem()
2280
2281 gp := getg()
2282 if gp.m.p == 0 {
2283 acquirep(pp)
2284 }
2285
2286
2287
2288 if sched.freem != nil {
2289 lock(&sched.lock)
2290 var newList *m
2291 for freem := sched.freem; freem != nil; {
2292
2293 wait := freem.freeWait.Load()
2294 if wait == freeMWait {
2295 next := freem.freelink
2296 freem.freelink = newList
2297 newList = freem
2298 freem = next
2299 continue
2300 }
2301
2302
2303
2304 if traceEnabled() || traceShuttingDown() {
2305 traceThreadDestroy(freem)
2306 }
2307
2308
2309
2310 if wait == freeMStack {
2311
2312
2313
2314 systemstack(func() {
2315 stackfree(freem.g0.stack)
2316 if valgrindenabled {
2317 valgrindDeregisterStack(freem.g0.valgrindStackID)
2318 freem.g0.valgrindStackID = 0
2319 }
2320 })
2321 }
2322 freem = freem.freelink
2323 }
2324 sched.freem = newList
2325 unlock(&sched.lock)
2326 }
2327
2328 mp := &new(mPadded).m
2329 mp.mstartfn = fn
2330 mcommoninit(mp, id)
2331
2332
2333
2334 if iscgo || mStackIsSystemAllocated() {
2335 mp.g0 = malg(-1)
2336 } else {
2337 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2338 }
2339 mp.g0.m = mp
2340
2341 if pp == gp.m.p.ptr() {
2342 releasep()
2343 }
2344
2345 releasem(gp.m)
2346 allocmLock.runlock()
2347 return mp
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389 func needm(signal bool) {
2390 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2391
2392
2393
2394
2395
2396
2397 writeErrStr("fatal error: cgo callback before cgo call\n")
2398 exit(1)
2399 }
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 var sigmask sigset
2410 sigsave(&sigmask)
2411 sigblock(false)
2412
2413
2414
2415
2416 mp, last := getExtraM()
2417
2418
2419
2420
2421
2422
2423
2424
2425 mp.needextram = last
2426
2427
2428 mp.sigmask = sigmask
2429
2430
2431
2432 osSetupTLS(mp)
2433
2434
2435
2436 setg(mp.g0)
2437 sp := sys.GetCallerSP()
2438 callbackUpdateSystemStack(mp, sp, signal)
2439
2440
2441
2442
2443 mp.isExtraInC = false
2444
2445
2446 asminit()
2447 minit()
2448
2449
2450
2451
2452
2453
2454 var trace traceLocker
2455 if !signal {
2456 trace = traceAcquire()
2457 }
2458
2459
2460 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2461 sched.ngsys.Add(-1)
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471 addGSyscallNoP(mp)
2472
2473 if !signal {
2474 if trace.ok() {
2475 trace.GoCreateSyscall(mp.curg)
2476 traceRelease(trace)
2477 }
2478 }
2479 mp.isExtraInSig = signal
2480 }
2481
2482
2483
2484
2485 func needAndBindM() {
2486 needm(false)
2487
2488 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2489 cgoBindM()
2490 }
2491 }
2492
2493
2494
2495
2496 func newextram() {
2497 c := extraMWaiters.Swap(0)
2498 if c > 0 {
2499 for i := uint32(0); i < c; i++ {
2500 oneNewExtraM()
2501 }
2502 } else if extraMLength.Load() == 0 {
2503
2504 oneNewExtraM()
2505 }
2506 }
2507
2508
2509 func oneNewExtraM() {
2510
2511
2512
2513
2514
2515 mp := allocm(nil, nil, -1)
2516 gp := malg(4096)
2517 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2518 gp.sched.sp = gp.stack.hi
2519 gp.sched.sp -= 4 * goarch.PtrSize
2520 gp.sched.lr = 0
2521 gp.sched.g = guintptr(unsafe.Pointer(gp))
2522 gp.syscallpc = gp.sched.pc
2523 gp.syscallsp = gp.sched.sp
2524 gp.stktopsp = gp.sched.sp
2525
2526
2527
2528 casgstatus(gp, _Gidle, _Gdeadextra)
2529 gp.m = mp
2530 mp.curg = gp
2531 mp.isextra = true
2532
2533 mp.isExtraInC = true
2534 mp.lockedInt++
2535 mp.lockedg.set(gp)
2536 gp.lockedm.set(mp)
2537 gp.goid = sched.goidgen.Add(1)
2538 if raceenabled {
2539 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2540 }
2541
2542 allgadd(gp)
2543
2544
2545
2546
2547
2548 sched.ngsys.Add(1)
2549
2550
2551 addExtraM(mp)
2552 }
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587 func dropm() {
2588
2589
2590
2591 mp := getg().m
2592
2593
2594
2595
2596
2597 var trace traceLocker
2598 if !mp.isExtraInSig {
2599 trace = traceAcquire()
2600 }
2601
2602
2603 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2604 mp.curg.preemptStop = false
2605 sched.ngsys.Add(1)
2606 decGSyscallNoP(mp)
2607
2608 if !mp.isExtraInSig {
2609 if trace.ok() {
2610 trace.GoDestroySyscall()
2611 traceRelease(trace)
2612 }
2613 }
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 mp.syscalltick--
2629
2630
2631
2632 mp.curg.trace.reset()
2633
2634
2635
2636
2637 if traceEnabled() || traceShuttingDown() {
2638
2639
2640
2641
2642
2643
2644
2645 lock(&sched.lock)
2646 traceThreadDestroy(mp)
2647 unlock(&sched.lock)
2648 }
2649 mp.isExtraInSig = false
2650
2651
2652
2653
2654
2655 sigmask := mp.sigmask
2656 sigblock(false)
2657 unminit()
2658
2659 setg(nil)
2660
2661
2662
2663 g0 := mp.g0
2664 g0.stack.hi = 0
2665 g0.stack.lo = 0
2666 g0.stackguard0 = 0
2667 g0.stackguard1 = 0
2668 mp.g0StackAccurate = false
2669
2670 putExtraM(mp)
2671
2672 msigrestore(sigmask)
2673 }
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695 func cgoBindM() {
2696 if GOOS == "windows" || GOOS == "plan9" {
2697 fatal("bindm in unexpected GOOS")
2698 }
2699 g := getg()
2700 if g.m.g0 != g {
2701 fatal("the current g is not g0")
2702 }
2703 if _cgo_bindm != nil {
2704 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2705 }
2706 }
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719 func getm() uintptr {
2720 return uintptr(unsafe.Pointer(getg().m))
2721 }
2722
2723 var (
2724
2725
2726
2727
2728
2729
2730 extraM atomic.Uintptr
2731
2732 extraMLength atomic.Uint32
2733
2734 extraMWaiters atomic.Uint32
2735
2736
2737 extraMInUse atomic.Uint32
2738 )
2739
2740
2741
2742
2743
2744
2745
2746
2747 func lockextra(nilokay bool) *m {
2748 const locked = 1
2749
2750 incr := false
2751 for {
2752 old := extraM.Load()
2753 if old == locked {
2754 osyield_no_g()
2755 continue
2756 }
2757 if old == 0 && !nilokay {
2758 if !incr {
2759
2760
2761
2762 extraMWaiters.Add(1)
2763 incr = true
2764 }
2765 usleep_no_g(1)
2766 continue
2767 }
2768 if extraM.CompareAndSwap(old, locked) {
2769 return (*m)(unsafe.Pointer(old))
2770 }
2771 osyield_no_g()
2772 continue
2773 }
2774 }
2775
2776
2777 func unlockextra(mp *m, delta int32) {
2778 extraMLength.Add(delta)
2779 extraM.Store(uintptr(unsafe.Pointer(mp)))
2780 }
2781
2782
2783
2784
2785
2786
2787
2788
2789 func getExtraM() (mp *m, last bool) {
2790 mp = lockextra(false)
2791 extraMInUse.Add(1)
2792 unlockextra(mp.schedlink.ptr(), -1)
2793 return mp, mp.schedlink.ptr() == nil
2794 }
2795
2796
2797
2798
2799
2800 func putExtraM(mp *m) {
2801 extraMInUse.Add(-1)
2802 addExtraM(mp)
2803 }
2804
2805
2806
2807
2808 func addExtraM(mp *m) {
2809 mnext := lockextra(true)
2810 mp.schedlink.set(mnext)
2811 unlockextra(mp, 1)
2812 }
2813
2814 var (
2815
2816
2817
2818 allocmLock rwmutex
2819
2820
2821
2822
2823 execLock rwmutex
2824 )
2825
2826
2827
2828 const (
2829 failthreadcreate = "runtime: failed to create new OS thread\n"
2830 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2831 )
2832
2833
2834
2835
2836 var newmHandoff struct {
2837 lock mutex
2838
2839
2840
2841 newm muintptr
2842
2843
2844
2845 waiting bool
2846 wake note
2847
2848
2849
2850
2851 haveTemplateThread uint32
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861 func newm(fn func(), pp *p, id int64) {
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872 acquirem()
2873
2874 mp := allocm(pp, fn, id)
2875 mp.nextp.set(pp)
2876 mp.sigmask = initSigmask
2877 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889 lock(&newmHandoff.lock)
2890 if newmHandoff.haveTemplateThread == 0 {
2891 throw("on a locked thread with no template thread")
2892 }
2893 mp.schedlink = newmHandoff.newm
2894 newmHandoff.newm.set(mp)
2895 if newmHandoff.waiting {
2896 newmHandoff.waiting = false
2897 notewakeup(&newmHandoff.wake)
2898 }
2899 unlock(&newmHandoff.lock)
2900
2901
2902
2903 releasem(getg().m)
2904 return
2905 }
2906 newm1(mp)
2907 releasem(getg().m)
2908 }
2909
2910 func newm1(mp *m) {
2911 if iscgo {
2912 var ts cgothreadstart
2913 if _cgo_thread_start == nil {
2914 throw("_cgo_thread_start missing")
2915 }
2916 ts.g.set(mp.g0)
2917 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2918 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2919 if msanenabled {
2920 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2921 }
2922 if asanenabled {
2923 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2924 }
2925 execLock.rlock()
2926 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2927 execLock.runlock()
2928 return
2929 }
2930 execLock.rlock()
2931 newosproc(mp)
2932 execLock.runlock()
2933 }
2934
2935
2936
2937
2938
2939 func startTemplateThread() {
2940 if GOARCH == "wasm" {
2941 return
2942 }
2943
2944
2945
2946 mp := acquirem()
2947 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2948 releasem(mp)
2949 return
2950 }
2951 newm(templateThread, nil, -1)
2952 releasem(mp)
2953 }
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967 func templateThread() {
2968 lock(&sched.lock)
2969 sched.nmsys++
2970 checkdead()
2971 unlock(&sched.lock)
2972
2973 for {
2974 lock(&newmHandoff.lock)
2975 for newmHandoff.newm != 0 {
2976 newm := newmHandoff.newm.ptr()
2977 newmHandoff.newm = 0
2978 unlock(&newmHandoff.lock)
2979 for newm != nil {
2980 next := newm.schedlink.ptr()
2981 newm.schedlink = 0
2982 newm1(newm)
2983 newm = next
2984 }
2985 lock(&newmHandoff.lock)
2986 }
2987 newmHandoff.waiting = true
2988 noteclear(&newmHandoff.wake)
2989 unlock(&newmHandoff.lock)
2990 notesleep(&newmHandoff.wake)
2991 }
2992 }
2993
2994
2995
2996 func stopm() {
2997 gp := getg()
2998
2999 if gp.m.locks != 0 {
3000 throw("stopm holding locks")
3001 }
3002 if gp.m.p != 0 {
3003 throw("stopm holding p")
3004 }
3005 if gp.m.spinning {
3006 throw("stopm spinning")
3007 }
3008
3009 lock(&sched.lock)
3010 mput(gp.m)
3011 unlock(&sched.lock)
3012 mPark()
3013 acquirep(gp.m.nextp.ptr())
3014 gp.m.nextp = 0
3015 }
3016
3017 func mspinning() {
3018
3019 getg().m.spinning = true
3020 }
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039 func startm(pp *p, spinning, lockheld bool) {
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056 mp := acquirem()
3057 if !lockheld {
3058 lock(&sched.lock)
3059 }
3060 if pp == nil {
3061 if spinning {
3062
3063
3064
3065 throw("startm: P required for spinning=true")
3066 }
3067 pp, _ = pidleget(0)
3068 if pp == nil {
3069 if !lockheld {
3070 unlock(&sched.lock)
3071 }
3072 releasem(mp)
3073 return
3074 }
3075 }
3076 nmp := mget()
3077 if nmp == nil {
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092 id := mReserveID()
3093 unlock(&sched.lock)
3094
3095 var fn func()
3096 if spinning {
3097
3098 fn = mspinning
3099 }
3100 newm(fn, pp, id)
3101
3102 if lockheld {
3103 lock(&sched.lock)
3104 }
3105
3106
3107 releasem(mp)
3108 return
3109 }
3110 if !lockheld {
3111 unlock(&sched.lock)
3112 }
3113 if nmp.spinning {
3114 throw("startm: m is spinning")
3115 }
3116 if nmp.nextp != 0 {
3117 throw("startm: m has p")
3118 }
3119 if spinning && !runqempty(pp) {
3120 throw("startm: p has runnable gs")
3121 }
3122
3123 nmp.spinning = spinning
3124 nmp.nextp.set(pp)
3125 notewakeup(&nmp.park)
3126
3127
3128 releasem(mp)
3129 }
3130
3131
3132
3133
3134
3135 func handoffp(pp *p) {
3136
3137
3138
3139
3140 if !runqempty(pp) || !sched.runq.empty() {
3141 startm(pp, false, false)
3142 return
3143 }
3144
3145 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3146 startm(pp, false, false)
3147 return
3148 }
3149
3150 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3151 startm(pp, false, false)
3152 return
3153 }
3154
3155
3156 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3157 sched.needspinning.Store(0)
3158 startm(pp, true, false)
3159 return
3160 }
3161 lock(&sched.lock)
3162 if sched.gcwaiting.Load() {
3163 pp.status = _Pgcstop
3164 pp.gcStopTime = nanotime()
3165 sched.stopwait--
3166 if sched.stopwait == 0 {
3167 notewakeup(&sched.stopnote)
3168 }
3169 unlock(&sched.lock)
3170 return
3171 }
3172 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3173 sched.safePointFn(pp)
3174 sched.safePointWait--
3175 if sched.safePointWait == 0 {
3176 notewakeup(&sched.safePointNote)
3177 }
3178 }
3179 if !sched.runq.empty() {
3180 unlock(&sched.lock)
3181 startm(pp, false, false)
3182 return
3183 }
3184
3185
3186 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3187 unlock(&sched.lock)
3188 startm(pp, false, false)
3189 return
3190 }
3191
3192
3193
3194 when := pp.timers.wakeTime()
3195 pidleput(pp, 0)
3196 unlock(&sched.lock)
3197
3198 if when != 0 {
3199 wakeNetPoller(when)
3200 }
3201 }
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216 func wakep() {
3217
3218
3219 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3220 return
3221 }
3222
3223
3224
3225
3226
3227
3228 mp := acquirem()
3229
3230 var pp *p
3231 lock(&sched.lock)
3232 pp, _ = pidlegetSpinning(0)
3233 if pp == nil {
3234 if sched.nmspinning.Add(-1) < 0 {
3235 throw("wakep: negative nmspinning")
3236 }
3237 unlock(&sched.lock)
3238 releasem(mp)
3239 return
3240 }
3241
3242
3243
3244
3245 unlock(&sched.lock)
3246
3247 startm(pp, true, false)
3248
3249 releasem(mp)
3250 }
3251
3252
3253
3254 func stoplockedm() {
3255 gp := getg()
3256
3257 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3258 throw("stoplockedm: inconsistent locking")
3259 }
3260 if gp.m.p != 0 {
3261
3262 pp := releasep()
3263 handoffp(pp)
3264 }
3265 incidlelocked(1)
3266
3267 mPark()
3268 status := readgstatus(gp.m.lockedg.ptr())
3269 if status&^_Gscan != _Grunnable {
3270 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3271 dumpgstatus(gp.m.lockedg.ptr())
3272 throw("stoplockedm: not runnable")
3273 }
3274 acquirep(gp.m.nextp.ptr())
3275 gp.m.nextp = 0
3276 }
3277
3278
3279
3280
3281
3282 func startlockedm(gp *g) {
3283 mp := gp.lockedm.ptr()
3284 if mp == getg().m {
3285 throw("startlockedm: locked to me")
3286 }
3287 if mp.nextp != 0 {
3288 throw("startlockedm: m has p")
3289 }
3290
3291 incidlelocked(-1)
3292 pp := releasep()
3293 mp.nextp.set(pp)
3294 notewakeup(&mp.park)
3295 stopm()
3296 }
3297
3298
3299
3300 func gcstopm() {
3301 gp := getg()
3302
3303 if !sched.gcwaiting.Load() {
3304 throw("gcstopm: not waiting for gc")
3305 }
3306 if gp.m.spinning {
3307 gp.m.spinning = false
3308
3309
3310 if sched.nmspinning.Add(-1) < 0 {
3311 throw("gcstopm: negative nmspinning")
3312 }
3313 }
3314 pp := releasep()
3315 lock(&sched.lock)
3316 pp.status = _Pgcstop
3317 pp.gcStopTime = nanotime()
3318 sched.stopwait--
3319 if sched.stopwait == 0 {
3320 notewakeup(&sched.stopnote)
3321 }
3322 unlock(&sched.lock)
3323 stopm()
3324 }
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335 func execute(gp *g, inheritTime bool) {
3336 mp := getg().m
3337
3338 if goroutineProfile.active {
3339
3340
3341
3342 tryRecordGoroutineProfile(gp, nil, osyield)
3343 }
3344
3345
3346 mp.curg = gp
3347 gp.m = mp
3348 gp.syncSafePoint = false
3349 casgstatus(gp, _Grunnable, _Grunning)
3350 gp.waitsince = 0
3351 gp.preempt = false
3352 gp.stackguard0 = gp.stack.lo + stackGuard
3353 if !inheritTime {
3354 mp.p.ptr().schedtick++
3355 }
3356
3357 if sys.DITSupported && debug.dataindependenttiming != 1 {
3358 if gp.ditWanted && !mp.ditEnabled {
3359
3360
3361 sys.EnableDIT()
3362 mp.ditEnabled = true
3363 } else if !gp.ditWanted && mp.ditEnabled {
3364
3365
3366
3367
3368
3369 sys.DisableDIT()
3370 mp.ditEnabled = false
3371 }
3372 }
3373
3374
3375 hz := sched.profilehz
3376 if mp.profilehz != hz {
3377 setThreadCPUProfiler(hz)
3378 }
3379
3380 trace := traceAcquire()
3381 if trace.ok() {
3382 trace.GoStart()
3383 traceRelease(trace)
3384 }
3385
3386 gogo(&gp.sched)
3387 }
3388
3389
3390
3391
3392
3393 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3394 mp := getg().m
3395
3396
3397
3398
3399
3400 top:
3401
3402
3403
3404 mp.clearAllpSnapshot()
3405
3406 pp := mp.p.ptr()
3407 if sched.gcwaiting.Load() {
3408 gcstopm()
3409 goto top
3410 }
3411 if pp.runSafePointFn != 0 {
3412 runSafePointFn()
3413 }
3414
3415
3416
3417
3418
3419 now, pollUntil, _ := pp.timers.check(0, nil)
3420
3421
3422 if traceEnabled() || traceShuttingDown() {
3423 gp := traceReader()
3424 if gp != nil {
3425 trace := traceAcquire()
3426 casgstatus(gp, _Gwaiting, _Grunnable)
3427 if trace.ok() {
3428 trace.GoUnpark(gp, 0)
3429 traceRelease(trace)
3430 }
3431 return gp, false, true
3432 }
3433 }
3434
3435
3436 if gcBlackenEnabled != 0 {
3437 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3438 if gp != nil {
3439 return gp, false, true
3440 }
3441 now = tnow
3442 }
3443
3444
3445
3446
3447 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3448 lock(&sched.lock)
3449 gp := globrunqget()
3450 unlock(&sched.lock)
3451 if gp != nil {
3452 return gp, false, false
3453 }
3454 }
3455
3456
3457 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3458 if gp := wakefing(); gp != nil {
3459 ready(gp, 0, true)
3460 }
3461 }
3462
3463
3464 if gcCleanups.needsWake() {
3465 gcCleanups.wake()
3466 }
3467
3468 if *cgo_yield != nil {
3469 asmcgocall(*cgo_yield, nil)
3470 }
3471
3472
3473 if gp, inheritTime := runqget(pp); gp != nil {
3474 return gp, inheritTime, false
3475 }
3476
3477
3478 if !sched.runq.empty() {
3479 lock(&sched.lock)
3480 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3481 unlock(&sched.lock)
3482 if gp != nil {
3483 if runqputbatch(pp, &q); !q.empty() {
3484 throw("Couldn't put Gs into empty local runq")
3485 }
3486 return gp, false, false
3487 }
3488 }
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3500 list, delta := netpoll(0)
3501 sched.pollingNet.Store(0)
3502 if !list.empty() {
3503 gp := list.pop()
3504 injectglist(&list)
3505 netpollAdjustWaiters(delta)
3506 trace := traceAcquire()
3507 casgstatus(gp, _Gwaiting, _Grunnable)
3508 if trace.ok() {
3509 trace.GoUnpark(gp, 0)
3510 traceRelease(trace)
3511 }
3512 return gp, false, false
3513 }
3514 }
3515
3516
3517
3518
3519
3520
3521 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3522 if !mp.spinning {
3523 mp.becomeSpinning()
3524 }
3525
3526 gp, inheritTime, tnow, w, newWork := stealWork(now)
3527 if gp != nil {
3528
3529 return gp, inheritTime, false
3530 }
3531 if newWork {
3532
3533
3534 goto top
3535 }
3536
3537 now = tnow
3538 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3539
3540 pollUntil = w
3541 }
3542 }
3543
3544
3545
3546
3547
3548 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3549 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3550 if node != nil {
3551 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3552 gp := node.gp.ptr()
3553
3554 trace := traceAcquire()
3555 casgstatus(gp, _Gwaiting, _Grunnable)
3556 if trace.ok() {
3557 trace.GoUnpark(gp, 0)
3558 traceRelease(trace)
3559 }
3560 return gp, false, false
3561 }
3562 gcController.removeIdleMarkWorker()
3563 }
3564
3565
3566
3567
3568
3569 gp, otherReady := beforeIdle(now, pollUntil)
3570 if gp != nil {
3571 trace := traceAcquire()
3572 casgstatus(gp, _Gwaiting, _Grunnable)
3573 if trace.ok() {
3574 trace.GoUnpark(gp, 0)
3575 traceRelease(trace)
3576 }
3577 return gp, false, false
3578 }
3579 if otherReady {
3580 goto top
3581 }
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591 allpSnapshot := mp.snapshotAllp()
3592
3593
3594 idlepMaskSnapshot := idlepMask
3595 timerpMaskSnapshot := timerpMask
3596
3597
3598 lock(&sched.lock)
3599 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3600 unlock(&sched.lock)
3601 goto top
3602 }
3603 if !sched.runq.empty() {
3604 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3605 unlock(&sched.lock)
3606 if gp == nil {
3607 throw("global runq empty with non-zero runqsize")
3608 }
3609 if runqputbatch(pp, &q); !q.empty() {
3610 throw("Couldn't put Gs into empty local runq")
3611 }
3612 return gp, false, false
3613 }
3614 if !mp.spinning && sched.needspinning.Load() == 1 {
3615
3616 mp.becomeSpinning()
3617 unlock(&sched.lock)
3618 goto top
3619 }
3620 if releasep() != pp {
3621 throw("findRunnable: wrong p")
3622 }
3623 now = pidleput(pp, now)
3624 unlock(&sched.lock)
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662 wasSpinning := mp.spinning
3663 if mp.spinning {
3664 mp.spinning = false
3665 if sched.nmspinning.Add(-1) < 0 {
3666 throw("findRunnable: negative nmspinning")
3667 }
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680 lock(&sched.lock)
3681 if !sched.runq.empty() {
3682 pp, _ := pidlegetSpinning(0)
3683 if pp != nil {
3684 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3685 unlock(&sched.lock)
3686 if gp == nil {
3687 throw("global runq empty with non-zero runqsize")
3688 }
3689 if runqputbatch(pp, &q); !q.empty() {
3690 throw("Couldn't put Gs into empty local runq")
3691 }
3692 acquirep(pp)
3693 mp.becomeSpinning()
3694 return gp, false, false
3695 }
3696 }
3697 unlock(&sched.lock)
3698
3699 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3700 if pp != nil {
3701 acquirep(pp)
3702 mp.becomeSpinning()
3703 goto top
3704 }
3705
3706
3707 pp, gp := checkIdleGCNoP()
3708 if pp != nil {
3709 acquirep(pp)
3710 mp.becomeSpinning()
3711
3712
3713 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3714 trace := traceAcquire()
3715 casgstatus(gp, _Gwaiting, _Grunnable)
3716 if trace.ok() {
3717 trace.GoUnpark(gp, 0)
3718 traceRelease(trace)
3719 }
3720 return gp, false, false
3721 }
3722
3723
3724
3725
3726
3727
3728
3729 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3730 }
3731
3732
3733
3734
3735
3736 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3737 sched.pollUntil.Store(pollUntil)
3738 if mp.p != 0 {
3739 throw("findRunnable: netpoll with p")
3740 }
3741 if mp.spinning {
3742 throw("findRunnable: netpoll with spinning")
3743 }
3744 delay := int64(-1)
3745 if pollUntil != 0 {
3746 if now == 0 {
3747 now = nanotime()
3748 }
3749 delay = pollUntil - now
3750 if delay < 0 {
3751 delay = 0
3752 }
3753 }
3754 if faketime != 0 {
3755
3756 delay = 0
3757 }
3758 list, delta := netpoll(delay)
3759
3760 now = nanotime()
3761 sched.pollUntil.Store(0)
3762 sched.lastpoll.Store(now)
3763 if faketime != 0 && list.empty() {
3764
3765
3766 stopm()
3767 goto top
3768 }
3769 lock(&sched.lock)
3770 pp, _ := pidleget(now)
3771 unlock(&sched.lock)
3772 if pp == nil {
3773 injectglist(&list)
3774 netpollAdjustWaiters(delta)
3775 } else {
3776 acquirep(pp)
3777 if !list.empty() {
3778 gp := list.pop()
3779 injectglist(&list)
3780 netpollAdjustWaiters(delta)
3781 trace := traceAcquire()
3782 casgstatus(gp, _Gwaiting, _Grunnable)
3783 if trace.ok() {
3784 trace.GoUnpark(gp, 0)
3785 traceRelease(trace)
3786 }
3787 return gp, false, false
3788 }
3789 if wasSpinning {
3790 mp.becomeSpinning()
3791 }
3792 goto top
3793 }
3794 } else if pollUntil != 0 && netpollinited() {
3795 pollerPollUntil := sched.pollUntil.Load()
3796 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3797 netpollBreak()
3798 }
3799 }
3800 stopm()
3801 goto top
3802 }
3803
3804
3805
3806
3807
3808 func pollWork() bool {
3809 if !sched.runq.empty() {
3810 return true
3811 }
3812 p := getg().m.p.ptr()
3813 if !runqempty(p) {
3814 return true
3815 }
3816 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3817 if list, delta := netpoll(0); !list.empty() {
3818 injectglist(&list)
3819 netpollAdjustWaiters(delta)
3820 return true
3821 }
3822 }
3823 return false
3824 }
3825
3826
3827
3828
3829
3830
3831
3832 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3833 pp := getg().m.p.ptr()
3834
3835 ranTimer := false
3836
3837 const stealTries = 4
3838 for i := 0; i < stealTries; i++ {
3839 stealTimersOrRunNextG := i == stealTries-1
3840
3841 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3842 if sched.gcwaiting.Load() {
3843
3844 return nil, false, now, pollUntil, true
3845 }
3846 p2 := allp[enum.position()]
3847 if pp == p2 {
3848 continue
3849 }
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3865 tnow, w, ran := p2.timers.check(now, nil)
3866 now = tnow
3867 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3868 pollUntil = w
3869 }
3870 if ran {
3871
3872
3873
3874
3875
3876
3877
3878
3879 if gp, inheritTime := runqget(pp); gp != nil {
3880 return gp, inheritTime, now, pollUntil, ranTimer
3881 }
3882 ranTimer = true
3883 }
3884 }
3885
3886
3887 if !idlepMask.read(enum.position()) {
3888 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3889 return gp, false, now, pollUntil, ranTimer
3890 }
3891 }
3892 }
3893 }
3894
3895
3896
3897
3898 return nil, false, now, pollUntil, ranTimer
3899 }
3900
3901
3902
3903
3904
3905
3906 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3907 for id, p2 := range allpSnapshot {
3908 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3909 lock(&sched.lock)
3910 pp, _ := pidlegetSpinning(0)
3911 if pp == nil {
3912
3913 unlock(&sched.lock)
3914 return nil
3915 }
3916 unlock(&sched.lock)
3917 return pp
3918 }
3919 }
3920
3921
3922 return nil
3923 }
3924
3925
3926
3927
3928 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3929 for id, p2 := range allpSnapshot {
3930 if timerpMaskSnapshot.read(uint32(id)) {
3931 w := p2.timers.wakeTime()
3932 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3933 pollUntil = w
3934 }
3935 }
3936 }
3937
3938 return pollUntil
3939 }
3940
3941
3942
3943
3944
3945 func checkIdleGCNoP() (*p, *g) {
3946
3947
3948
3949
3950
3951
3952 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3953 return nil, nil
3954 }
3955 if !gcShouldScheduleWorker(nil) {
3956 return nil, nil
3957 }
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976 lock(&sched.lock)
3977 pp, now := pidlegetSpinning(0)
3978 if pp == nil {
3979 unlock(&sched.lock)
3980 return nil, nil
3981 }
3982
3983
3984 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3985 pidleput(pp, now)
3986 unlock(&sched.lock)
3987 return nil, nil
3988 }
3989
3990 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3991 if node == nil {
3992 pidleput(pp, now)
3993 unlock(&sched.lock)
3994 gcController.removeIdleMarkWorker()
3995 return nil, nil
3996 }
3997
3998 unlock(&sched.lock)
3999
4000 return pp, node.gp.ptr()
4001 }
4002
4003
4004
4005
4006 func wakeNetPoller(when int64) {
4007 if sched.lastpoll.Load() == 0 {
4008
4009
4010
4011
4012 pollerPollUntil := sched.pollUntil.Load()
4013 if pollerPollUntil == 0 || pollerPollUntil > when {
4014 netpollBreak()
4015 }
4016 } else {
4017
4018
4019 if GOOS != "plan9" {
4020 wakep()
4021 }
4022 }
4023 }
4024
4025 func resetspinning() {
4026 gp := getg()
4027 if !gp.m.spinning {
4028 throw("resetspinning: not a spinning m")
4029 }
4030 gp.m.spinning = false
4031 nmspinning := sched.nmspinning.Add(-1)
4032 if nmspinning < 0 {
4033 throw("findRunnable: negative nmspinning")
4034 }
4035
4036
4037
4038 wakep()
4039 }
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049 func injectglist(glist *gList) {
4050 if glist.empty() {
4051 return
4052 }
4053
4054
4055
4056 var tail *g
4057 trace := traceAcquire()
4058 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4059 tail = gp
4060 casgstatus(gp, _Gwaiting, _Grunnable)
4061 if trace.ok() {
4062 trace.GoUnpark(gp, 0)
4063 }
4064 }
4065 if trace.ok() {
4066 traceRelease(trace)
4067 }
4068
4069
4070 q := gQueue{glist.head, tail.guintptr(), glist.size}
4071 *glist = gList{}
4072
4073 startIdle := func(n int32) {
4074 for ; n > 0; n-- {
4075 mp := acquirem()
4076 lock(&sched.lock)
4077
4078 pp, _ := pidlegetSpinning(0)
4079 if pp == nil {
4080 unlock(&sched.lock)
4081 releasem(mp)
4082 break
4083 }
4084
4085 startm(pp, false, true)
4086 unlock(&sched.lock)
4087 releasem(mp)
4088 }
4089 }
4090
4091 pp := getg().m.p.ptr()
4092 if pp == nil {
4093 n := q.size
4094 lock(&sched.lock)
4095 globrunqputbatch(&q)
4096 unlock(&sched.lock)
4097 startIdle(n)
4098 return
4099 }
4100
4101 var globq gQueue
4102 npidle := sched.npidle.Load()
4103 for ; npidle > 0 && !q.empty(); npidle-- {
4104 g := q.pop()
4105 globq.pushBack(g)
4106 }
4107 if !globq.empty() {
4108 n := globq.size
4109 lock(&sched.lock)
4110 globrunqputbatch(&globq)
4111 unlock(&sched.lock)
4112 startIdle(n)
4113 }
4114
4115 if runqputbatch(pp, &q); !q.empty() {
4116 lock(&sched.lock)
4117 globrunqputbatch(&q)
4118 unlock(&sched.lock)
4119 }
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134 wakep()
4135 }
4136
4137
4138
4139 func schedule() {
4140 mp := getg().m
4141
4142 if mp.locks != 0 {
4143 throw("schedule: holding locks")
4144 }
4145
4146 if mp.lockedg != 0 {
4147 stoplockedm()
4148 execute(mp.lockedg.ptr(), false)
4149 }
4150
4151
4152
4153 if mp.incgo {
4154 throw("schedule: in cgo")
4155 }
4156
4157 top:
4158 pp := mp.p.ptr()
4159 pp.preempt = false
4160
4161
4162
4163
4164 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4165 throw("schedule: spinning with local work")
4166 }
4167
4168 gp, inheritTime, tryWakeP := findRunnable()
4169
4170
4171 pp = mp.p.ptr()
4172
4173
4174
4175
4176 mp.clearAllpSnapshot()
4177
4178
4179
4180
4181
4182
4183
4184
4185 gcController.releaseNextGCMarkWorker(pp)
4186
4187 if debug.dontfreezetheworld > 0 && freezing.Load() {
4188
4189
4190
4191
4192
4193
4194
4195 lock(&deadlock)
4196 lock(&deadlock)
4197 }
4198
4199
4200
4201
4202 if mp.spinning {
4203 resetspinning()
4204 }
4205
4206 if sched.disable.user && !schedEnabled(gp) {
4207
4208
4209
4210 lock(&sched.lock)
4211 if schedEnabled(gp) {
4212
4213
4214 unlock(&sched.lock)
4215 } else {
4216 sched.disable.runnable.pushBack(gp)
4217 unlock(&sched.lock)
4218 goto top
4219 }
4220 }
4221
4222
4223
4224 if tryWakeP {
4225 wakep()
4226 }
4227 if gp.lockedm != 0 {
4228
4229
4230 startlockedm(gp)
4231 goto top
4232 }
4233
4234 execute(gp, inheritTime)
4235 }
4236
4237
4238
4239
4240
4241
4242
4243
4244 func dropg() {
4245 gp := getg()
4246
4247 setMNoWB(&gp.m.curg.m, nil)
4248 setGNoWB(&gp.m.curg, nil)
4249 }
4250
4251 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4252 unlock((*mutex)(lock))
4253 return true
4254 }
4255
4256
4257 func park_m(gp *g) {
4258 mp := getg().m
4259
4260 trace := traceAcquire()
4261
4262
4263
4264
4265
4266 bubble := gp.bubble
4267 if bubble != nil {
4268 bubble.incActive()
4269 }
4270
4271 if trace.ok() {
4272
4273
4274
4275 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4276 }
4277
4278
4279 casgstatus(gp, _Grunning, _Gwaiting)
4280 if trace.ok() {
4281 traceRelease(trace)
4282 }
4283
4284 dropg()
4285
4286 if fn := mp.waitunlockf; fn != nil {
4287 ok := fn(gp, mp.waitlock)
4288 mp.waitunlockf = nil
4289 mp.waitlock = nil
4290 if !ok {
4291 trace := traceAcquire()
4292 casgstatus(gp, _Gwaiting, _Grunnable)
4293 if bubble != nil {
4294 bubble.decActive()
4295 }
4296 if trace.ok() {
4297 trace.GoUnpark(gp, 2)
4298 traceRelease(trace)
4299 }
4300 execute(gp, true)
4301 }
4302 }
4303
4304 if bubble != nil {
4305 bubble.decActive()
4306 }
4307
4308 schedule()
4309 }
4310
4311 func goschedImpl(gp *g, preempted bool) {
4312 pp := gp.m.p.ptr()
4313 trace := traceAcquire()
4314 status := readgstatus(gp)
4315 if status&^_Gscan != _Grunning {
4316 dumpgstatus(gp)
4317 throw("bad g status")
4318 }
4319 if trace.ok() {
4320
4321
4322
4323 if preempted {
4324 trace.GoPreempt()
4325 } else {
4326 trace.GoSched()
4327 }
4328 }
4329 casgstatus(gp, _Grunning, _Grunnable)
4330 if trace.ok() {
4331 traceRelease(trace)
4332 }
4333
4334 dropg()
4335 if preempted && sched.gcwaiting.Load() {
4336
4337
4338 runqput(pp, gp, true)
4339 } else {
4340 lock(&sched.lock)
4341 globrunqput(gp)
4342 unlock(&sched.lock)
4343 }
4344
4345 if mainStarted {
4346 wakep()
4347 }
4348
4349 schedule()
4350 }
4351
4352
4353 func gosched_m(gp *g) {
4354 goschedImpl(gp, false)
4355 }
4356
4357
4358 func goschedguarded_m(gp *g) {
4359 if !canPreemptM(gp.m) {
4360 gogo(&gp.sched)
4361 }
4362 goschedImpl(gp, false)
4363 }
4364
4365 func gopreempt_m(gp *g) {
4366 goschedImpl(gp, true)
4367 }
4368
4369
4370
4371
4372 func preemptPark(gp *g) {
4373 status := readgstatus(gp)
4374 if status&^_Gscan != _Grunning {
4375 dumpgstatus(gp)
4376 throw("bad g status")
4377 }
4378
4379 if gp.asyncSafePoint {
4380
4381
4382
4383 f := findfunc(gp.sched.pc)
4384 if !f.valid() {
4385 throw("preempt at unknown pc")
4386 }
4387 if f.flag&abi.FuncFlagSPWrite != 0 {
4388 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4389 throw("preempt SPWRITE")
4390 }
4391 }
4392
4393
4394
4395
4396
4397
4398
4399 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421 trace := traceAcquire()
4422 if trace.ok() {
4423 trace.GoPark(traceBlockPreempted, 0)
4424 }
4425
4426
4427
4428
4429 dropg()
4430
4431
4432 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4433 if trace.ok() {
4434 traceRelease(trace)
4435 }
4436
4437
4438 schedule()
4439 }
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455 func goyield() {
4456 checkTimeouts()
4457 mcall(goyield_m)
4458 }
4459
4460 func goyield_m(gp *g) {
4461 trace := traceAcquire()
4462 pp := gp.m.p.ptr()
4463 if trace.ok() {
4464
4465
4466
4467 trace.GoPreempt()
4468 }
4469 casgstatus(gp, _Grunning, _Grunnable)
4470 if trace.ok() {
4471 traceRelease(trace)
4472 }
4473 dropg()
4474 runqput(pp, gp, false)
4475 schedule()
4476 }
4477
4478
4479 func goexit1() {
4480 if raceenabled {
4481 if gp := getg(); gp.bubble != nil {
4482 racereleasemergeg(gp, gp.bubble.raceaddr())
4483 }
4484 racegoend()
4485 }
4486 trace := traceAcquire()
4487 if trace.ok() {
4488 trace.GoEnd()
4489 traceRelease(trace)
4490 }
4491 mcall(goexit0)
4492 }
4493
4494
4495 func goexit0(gp *g) {
4496 if goexperiment.RuntimeSecret && gp.secret > 0 {
4497
4498
4499 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4500
4501
4502 }
4503 gdestroy(gp)
4504 schedule()
4505 }
4506
4507 func gdestroy(gp *g) {
4508 mp := getg().m
4509 pp := mp.p.ptr()
4510
4511 casgstatus(gp, _Grunning, _Gdead)
4512 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4513 if isSystemGoroutine(gp, false) {
4514 sched.ngsys.Add(-1)
4515 }
4516 gp.m = nil
4517 locked := gp.lockedm != 0
4518 gp.lockedm = 0
4519 mp.lockedg = 0
4520 gp.preemptStop = false
4521 gp.paniconfault = false
4522 gp._defer = nil
4523 gp._panic = nil
4524 gp.writebuf = nil
4525 gp.waitreason = waitReasonZero
4526 gp.param = nil
4527 gp.labels = nil
4528 gp.timer = nil
4529 gp.bubble = nil
4530 gp.fipsOnlyBypass = false
4531 gp.secret = 0
4532
4533 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4534
4535
4536
4537 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4538 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4539 gcController.bgScanCredit.Add(scanCredit)
4540 gp.gcAssistBytes = 0
4541 }
4542
4543 dropg()
4544
4545 if GOARCH == "wasm" {
4546 gfput(pp, gp)
4547 return
4548 }
4549
4550 if locked && mp.lockedInt != 0 {
4551 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4552 if mp.isextra {
4553 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4554 }
4555 throw("exited a goroutine internally locked to the OS thread")
4556 }
4557 gfput(pp, gp)
4558 if locked {
4559
4560
4561
4562
4563
4564
4565 if GOOS != "plan9" {
4566 gogo(&mp.g0.sched)
4567 } else {
4568
4569
4570 mp.lockedExt = 0
4571 }
4572 }
4573 }
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583 func save(pc, sp, bp uintptr) {
4584 gp := getg()
4585
4586 if gp == gp.m.g0 || gp == gp.m.gsignal {
4587
4588
4589
4590
4591
4592 throw("save on system g not allowed")
4593 }
4594
4595 gp.sched.pc = pc
4596 gp.sched.sp = sp
4597 gp.sched.lr = 0
4598 gp.sched.bp = bp
4599
4600
4601
4602 if gp.sched.ctxt != nil {
4603 badctxt()
4604 }
4605 }
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631 func reentersyscall(pc, sp, bp uintptr) {
4632 gp := getg()
4633
4634
4635
4636 gp.m.locks++
4637
4638
4639
4640
4641
4642 gp.stackguard0 = stackPreempt
4643 gp.throwsplit = true
4644
4645
4646 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4647
4648 pp := gp.m.p.ptr()
4649 if pp.runSafePointFn != 0 {
4650
4651 systemstack(runSafePointFn)
4652 }
4653 gp.m.oldp.set(pp)
4654
4655
4656 save(pc, sp, bp)
4657 gp.syscallsp = sp
4658 gp.syscallpc = pc
4659 gp.syscallbp = bp
4660
4661
4662 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4663 systemstack(func() {
4664 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4665 throw("entersyscall")
4666 })
4667 }
4668 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4669 systemstack(func() {
4670 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4671 throw("entersyscall")
4672 })
4673 }
4674 trace := traceAcquire()
4675 if trace.ok() {
4676
4677
4678
4679
4680 systemstack(func() {
4681 trace.GoSysCall()
4682 })
4683
4684 save(pc, sp, bp)
4685 }
4686 if sched.gcwaiting.Load() {
4687
4688
4689
4690 systemstack(func() {
4691 entersyscallHandleGCWait(trace)
4692 })
4693
4694 save(pc, sp, bp)
4695 }
4696
4697
4698
4699
4700
4701 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4702 casgstatus(gp, _Grunning, _Gsyscall)
4703 }
4704 if staticLockRanking {
4705
4706 save(pc, sp, bp)
4707 }
4708 if trace.ok() {
4709
4710
4711
4712 traceRelease(trace)
4713 }
4714 if sched.sysmonwait.Load() {
4715 systemstack(entersyscallWakeSysmon)
4716
4717 save(pc, sp, bp)
4718 }
4719 gp.m.locks--
4720 }
4721
4722
4723
4724
4725 const debugExtendGrunningNoP = false
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741 func entersyscall() {
4742
4743
4744
4745
4746 fp := getcallerfp()
4747 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4748 }
4749
4750 func entersyscallWakeSysmon() {
4751 lock(&sched.lock)
4752 if sched.sysmonwait.Load() {
4753 sched.sysmonwait.Store(false)
4754 notewakeup(&sched.sysmonnote)
4755 }
4756 unlock(&sched.lock)
4757 }
4758
4759 func entersyscallHandleGCWait(trace traceLocker) {
4760 gp := getg()
4761
4762 lock(&sched.lock)
4763 if sched.stopwait > 0 {
4764
4765 pp := gp.m.p.ptr()
4766 pp.m = 0
4767 gp.m.p = 0
4768 atomic.Store(&pp.status, _Pgcstop)
4769
4770 if trace.ok() {
4771 trace.ProcStop(pp)
4772 }
4773 addGSyscallNoP(gp.m)
4774 pp.gcStopTime = nanotime()
4775 pp.syscalltick++
4776 if sched.stopwait--; sched.stopwait == 0 {
4777 notewakeup(&sched.stopnote)
4778 }
4779 }
4780 unlock(&sched.lock)
4781 }
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795 func entersyscallblock() {
4796 gp := getg()
4797
4798 gp.m.locks++
4799 gp.throwsplit = true
4800 gp.stackguard0 = stackPreempt
4801 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4802 gp.m.p.ptr().syscalltick++
4803
4804 addGSyscallNoP(gp.m)
4805
4806
4807 pc := sys.GetCallerPC()
4808 sp := sys.GetCallerSP()
4809 bp := getcallerfp()
4810 save(pc, sp, bp)
4811 gp.syscallsp = gp.sched.sp
4812 gp.syscallpc = gp.sched.pc
4813 gp.syscallbp = gp.sched.bp
4814 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4815 sp1 := sp
4816 sp2 := gp.sched.sp
4817 sp3 := gp.syscallsp
4818 systemstack(func() {
4819 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4820 throw("entersyscallblock")
4821 })
4822 }
4823
4824
4825
4826
4827
4828
4829 trace := traceAcquire()
4830 systemstack(func() {
4831 if trace.ok() {
4832 trace.GoSysCall()
4833 }
4834 handoffp(releasep())
4835 })
4836
4837
4838
4839 if debugExtendGrunningNoP {
4840 usleep(10)
4841 }
4842 casgstatus(gp, _Grunning, _Gsyscall)
4843 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4844 systemstack(func() {
4845 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4846 throw("entersyscallblock")
4847 })
4848 }
4849 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4850 systemstack(func() {
4851 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4852 throw("entersyscallblock")
4853 })
4854 }
4855 if trace.ok() {
4856 systemstack(func() {
4857 traceRelease(trace)
4858 })
4859 }
4860
4861
4862 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4863
4864 gp.m.locks--
4865 }
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887 func exitsyscall() {
4888 gp := getg()
4889
4890 gp.m.locks++
4891 if sys.GetCallerSP() > gp.syscallsp {
4892 throw("exitsyscall: syscall frame is no longer valid")
4893 }
4894 gp.waitsince = 0
4895
4896 if sched.stopwait == freezeStopWait {
4897
4898
4899
4900 systemstack(func() {
4901 lock(&deadlock)
4902 lock(&deadlock)
4903 })
4904 }
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4918 casgstatus(gp, _Gsyscall, _Grunning)
4919 }
4920
4921
4922
4923
4924 if debugExtendGrunningNoP {
4925 usleep(10)
4926 }
4927
4928
4929 oldp := gp.m.oldp.ptr()
4930 gp.m.oldp.set(nil)
4931
4932
4933 pp := gp.m.p.ptr()
4934 if pp != nil {
4935
4936 if trace := traceAcquire(); trace.ok() {
4937 systemstack(func() {
4938
4939
4940
4941
4942
4943
4944
4945
4946 if pp.syscalltick == gp.m.syscalltick {
4947 trace.GoSysExit(false)
4948 } else {
4949
4950
4951
4952
4953 trace.ProcSteal(pp)
4954 trace.ProcStart()
4955 trace.GoSysExit(true)
4956 trace.GoStart()
4957 }
4958 traceRelease(trace)
4959 })
4960 }
4961 } else {
4962
4963 systemstack(func() {
4964
4965 if pp := exitsyscallTryGetP(oldp); pp != nil {
4966
4967 acquirepNoTrace(pp)
4968
4969
4970 if trace := traceAcquire(); trace.ok() {
4971 trace.ProcStart()
4972 trace.GoSysExit(true)
4973 trace.GoStart()
4974 traceRelease(trace)
4975 }
4976 }
4977 })
4978 pp = gp.m.p.ptr()
4979 }
4980
4981
4982 if pp != nil {
4983 if goroutineProfile.active {
4984
4985
4986
4987 systemstack(func() {
4988 tryRecordGoroutineProfileWB(gp)
4989 })
4990 }
4991
4992
4993 pp.syscalltick++
4994
4995
4996
4997 gp.syscallsp = 0
4998 gp.m.locks--
4999 if gp.preempt {
5000
5001 gp.stackguard0 = stackPreempt
5002 } else {
5003
5004 gp.stackguard0 = gp.stack.lo + stackGuard
5005 }
5006 gp.throwsplit = false
5007
5008 if sched.disable.user && !schedEnabled(gp) {
5009
5010 Gosched()
5011 }
5012 return
5013 }
5014
5015 gp.m.locks--
5016
5017
5018 mcall(exitsyscallNoP)
5019
5020
5021
5022
5023
5024
5025
5026 gp.syscallsp = 0
5027 gp.m.p.ptr().syscalltick++
5028 gp.throwsplit = false
5029 }
5030
5031
5032
5033
5034
5035
5036
5037 func exitsyscallTryGetP(oldp *p) *p {
5038
5039 if oldp != nil {
5040 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5041 thread.takeP()
5042 decGSyscallNoP(getg().m)
5043 thread.resume()
5044 return oldp
5045 }
5046 }
5047
5048
5049 if sched.pidle != 0 {
5050 lock(&sched.lock)
5051 pp, _ := pidleget(0)
5052 if pp != nil && sched.sysmonwait.Load() {
5053 sched.sysmonwait.Store(false)
5054 notewakeup(&sched.sysmonnote)
5055 }
5056 unlock(&sched.lock)
5057 if pp != nil {
5058 decGSyscallNoP(getg().m)
5059 return pp
5060 }
5061 }
5062 return nil
5063 }
5064
5065
5066
5067
5068
5069
5070
5071 func exitsyscallNoP(gp *g) {
5072 traceExitingSyscall()
5073 trace := traceAcquire()
5074 casgstatus(gp, _Grunning, _Grunnable)
5075 traceExitedSyscall()
5076 if trace.ok() {
5077
5078
5079
5080
5081 trace.GoSysExit(true)
5082 traceRelease(trace)
5083 }
5084 decGSyscallNoP(getg().m)
5085 dropg()
5086 lock(&sched.lock)
5087 var pp *p
5088 if schedEnabled(gp) {
5089 pp, _ = pidleget(0)
5090 }
5091 var locked bool
5092 if pp == nil {
5093 globrunqput(gp)
5094
5095
5096
5097
5098
5099
5100 locked = gp.lockedm != 0
5101 } else if sched.sysmonwait.Load() {
5102 sched.sysmonwait.Store(false)
5103 notewakeup(&sched.sysmonnote)
5104 }
5105 unlock(&sched.lock)
5106 if pp != nil {
5107 acquirep(pp)
5108 execute(gp, false)
5109 }
5110 if locked {
5111
5112
5113
5114
5115 stoplockedm()
5116 execute(gp, false)
5117 }
5118 stopm()
5119 schedule()
5120 }
5121
5122
5123
5124
5125
5126
5127
5128 func addGSyscallNoP(mp *m) {
5129
5130
5131
5132 if !mp.isExtraInC {
5133
5134
5135
5136
5137
5138 sched.nGsyscallNoP.Add(1)
5139 }
5140 }
5141
5142
5143
5144
5145
5146
5147
5148 func decGSyscallNoP(mp *m) {
5149
5150
5151
5152 if !mp.isExtraInC {
5153 sched.nGsyscallNoP.Add(-1)
5154 }
5155 }
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169 func syscall_runtime_BeforeFork() {
5170 gp := getg().m.curg
5171
5172
5173
5174
5175 gp.m.locks++
5176 sigsave(&gp.m.sigmask)
5177 sigblock(false)
5178
5179
5180
5181
5182
5183 gp.stackguard0 = stackFork
5184 }
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198 func syscall_runtime_AfterFork() {
5199 gp := getg().m.curg
5200
5201
5202 gp.stackguard0 = gp.stack.lo + stackGuard
5203
5204 msigrestore(gp.m.sigmask)
5205
5206 gp.m.locks--
5207 }
5208
5209
5210
5211 var inForkedChild bool
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232 func syscall_runtime_AfterForkInChild() {
5233
5234
5235
5236
5237 inForkedChild = true
5238
5239 clearSignalHandlers()
5240
5241
5242
5243 msigrestore(getg().m.sigmask)
5244
5245 inForkedChild = false
5246 }
5247
5248
5249
5250
5251 var pendingPreemptSignals atomic.Int32
5252
5253
5254
5255
5256 func syscall_runtime_BeforeExec() {
5257
5258 execLock.lock()
5259
5260
5261
5262 if GOOS == "darwin" || GOOS == "ios" {
5263 for pendingPreemptSignals.Load() > 0 {
5264 osyield()
5265 }
5266 }
5267 }
5268
5269
5270
5271
5272 func syscall_runtime_AfterExec() {
5273 execLock.unlock()
5274 }
5275
5276
5277 func malg(stacksize int32) *g {
5278 newg := new(g)
5279 if stacksize >= 0 {
5280 stacksize = round2(stackSystem + stacksize)
5281 systemstack(func() {
5282 newg.stack = stackalloc(uint32(stacksize))
5283 if valgrindenabled {
5284 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5285 }
5286 })
5287 newg.stackguard0 = newg.stack.lo + stackGuard
5288 newg.stackguard1 = ^uintptr(0)
5289
5290
5291 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5292 }
5293 return newg
5294 }
5295
5296
5297
5298
5299 func newproc(fn *funcval) {
5300 gp := getg()
5301 pc := sys.GetCallerPC()
5302 systemstack(func() {
5303 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5304
5305 pp := getg().m.p.ptr()
5306 runqput(pp, newg, true)
5307
5308 if mainStarted {
5309 wakep()
5310 }
5311 })
5312 }
5313
5314
5315
5316
5317 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5318 if fn == nil {
5319 fatal("go of nil func value")
5320 }
5321
5322 mp := acquirem()
5323 pp := mp.p.ptr()
5324 newg := gfget(pp)
5325 if newg == nil {
5326 newg = malg(stackMin)
5327 casgstatus(newg, _Gidle, _Gdead)
5328 allgadd(newg)
5329 }
5330 if newg.stack.hi == 0 {
5331 throw("newproc1: newg missing stack")
5332 }
5333
5334 if readgstatus(newg) != _Gdead {
5335 throw("newproc1: new g is not Gdead")
5336 }
5337
5338 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5339 totalSize = alignUp(totalSize, sys.StackAlign)
5340 sp := newg.stack.hi - totalSize
5341 if usesLR {
5342
5343 *(*uintptr)(unsafe.Pointer(sp)) = 0
5344 prepGoExitFrame(sp)
5345 }
5346 if GOARCH == "arm64" {
5347
5348 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5349 }
5350
5351 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5352 newg.sched.sp = sp
5353 newg.stktopsp = sp
5354 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5355 newg.sched.g = guintptr(unsafe.Pointer(newg))
5356 gostartcallfn(&newg.sched, fn)
5357 newg.parentGoid = callergp.goid
5358 newg.gopc = callerpc
5359 newg.ancestors = saveAncestors(callergp)
5360 newg.startpc = fn.fn
5361 newg.runningCleanups.Store(false)
5362 if isSystemGoroutine(newg, false) {
5363 sched.ngsys.Add(1)
5364 } else {
5365
5366 newg.bubble = callergp.bubble
5367 if mp.curg != nil {
5368 newg.labels = mp.curg.labels
5369 }
5370 if goroutineProfile.active {
5371
5372
5373
5374
5375
5376 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5377 }
5378 }
5379
5380 newg.trackingSeq = uint8(cheaprand())
5381 if newg.trackingSeq%gTrackingPeriod == 0 {
5382 newg.tracking = true
5383 }
5384 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5385
5386
5387
5388 trace := traceAcquire()
5389 var status uint32 = _Grunnable
5390 if parked {
5391 status = _Gwaiting
5392 newg.waitreason = waitreason
5393 }
5394 if pp.goidcache == pp.goidcacheend {
5395
5396
5397
5398 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5399 pp.goidcache -= _GoidCacheBatch - 1
5400 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5401 }
5402 newg.goid = pp.goidcache
5403 casgstatus(newg, _Gdead, status)
5404 pp.goidcache++
5405 newg.trace.reset()
5406 if trace.ok() {
5407 trace.GoCreate(newg, newg.startpc, parked)
5408 traceRelease(trace)
5409 }
5410
5411
5412 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5413
5414
5415 newg.ditWanted = callergp.ditWanted
5416
5417
5418 if raceenabled {
5419 newg.racectx = racegostart(callerpc)
5420 newg.raceignore = 0
5421 if newg.labels != nil {
5422
5423
5424 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5425 }
5426 }
5427 pp.goroutinesCreated++
5428 releasem(mp)
5429
5430 return newg
5431 }
5432
5433
5434
5435
5436 func saveAncestors(callergp *g) *[]ancestorInfo {
5437
5438 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5439 return nil
5440 }
5441 var callerAncestors []ancestorInfo
5442 if callergp.ancestors != nil {
5443 callerAncestors = *callergp.ancestors
5444 }
5445 n := int32(len(callerAncestors)) + 1
5446 if n > debug.tracebackancestors {
5447 n = debug.tracebackancestors
5448 }
5449 ancestors := make([]ancestorInfo, n)
5450 copy(ancestors[1:], callerAncestors)
5451
5452 var pcs [tracebackInnerFrames]uintptr
5453 npcs := gcallers(callergp, 0, pcs[:])
5454 ipcs := make([]uintptr, npcs)
5455 copy(ipcs, pcs[:])
5456 ancestors[0] = ancestorInfo{
5457 pcs: ipcs,
5458 goid: callergp.goid,
5459 gopc: callergp.gopc,
5460 }
5461
5462 ancestorsp := new([]ancestorInfo)
5463 *ancestorsp = ancestors
5464 return ancestorsp
5465 }
5466
5467
5468
5469 func gfput(pp *p, gp *g) {
5470 if readgstatus(gp) != _Gdead {
5471 throw("gfput: bad status (not Gdead)")
5472 }
5473
5474 stksize := gp.stack.hi - gp.stack.lo
5475
5476 if stksize != uintptr(startingStackSize) {
5477
5478 stackfree(gp.stack)
5479 gp.stack.lo = 0
5480 gp.stack.hi = 0
5481 gp.stackguard0 = 0
5482 if valgrindenabled {
5483 valgrindDeregisterStack(gp.valgrindStackID)
5484 gp.valgrindStackID = 0
5485 }
5486 }
5487
5488 pp.gFree.push(gp)
5489 if pp.gFree.size >= 64 {
5490 var (
5491 stackQ gQueue
5492 noStackQ gQueue
5493 )
5494 for pp.gFree.size >= 32 {
5495 gp := pp.gFree.pop()
5496 if gp.stack.lo == 0 {
5497 noStackQ.push(gp)
5498 } else {
5499 stackQ.push(gp)
5500 }
5501 }
5502 lock(&sched.gFree.lock)
5503 sched.gFree.noStack.pushAll(noStackQ)
5504 sched.gFree.stack.pushAll(stackQ)
5505 unlock(&sched.gFree.lock)
5506 }
5507 }
5508
5509
5510
5511 func gfget(pp *p) *g {
5512 retry:
5513 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5514 lock(&sched.gFree.lock)
5515
5516 for pp.gFree.size < 32 {
5517
5518 gp := sched.gFree.stack.pop()
5519 if gp == nil {
5520 gp = sched.gFree.noStack.pop()
5521 if gp == nil {
5522 break
5523 }
5524 }
5525 pp.gFree.push(gp)
5526 }
5527 unlock(&sched.gFree.lock)
5528 goto retry
5529 }
5530 gp := pp.gFree.pop()
5531 if gp == nil {
5532 return nil
5533 }
5534 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5535
5536
5537
5538 systemstack(func() {
5539 stackfree(gp.stack)
5540 gp.stack.lo = 0
5541 gp.stack.hi = 0
5542 gp.stackguard0 = 0
5543 if valgrindenabled {
5544 valgrindDeregisterStack(gp.valgrindStackID)
5545 gp.valgrindStackID = 0
5546 }
5547 })
5548 }
5549 if gp.stack.lo == 0 {
5550
5551 systemstack(func() {
5552 gp.stack = stackalloc(startingStackSize)
5553 if valgrindenabled {
5554 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5555 }
5556 })
5557 gp.stackguard0 = gp.stack.lo + stackGuard
5558 } else {
5559 if raceenabled {
5560 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5561 }
5562 if msanenabled {
5563 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5564 }
5565 if asanenabled {
5566 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5567 }
5568 }
5569 return gp
5570 }
5571
5572
5573 func gfpurge(pp *p) {
5574 var (
5575 stackQ gQueue
5576 noStackQ gQueue
5577 )
5578 for !pp.gFree.empty() {
5579 gp := pp.gFree.pop()
5580 if gp.stack.lo == 0 {
5581 noStackQ.push(gp)
5582 } else {
5583 stackQ.push(gp)
5584 }
5585 }
5586 lock(&sched.gFree.lock)
5587 sched.gFree.noStack.pushAll(noStackQ)
5588 sched.gFree.stack.pushAll(stackQ)
5589 unlock(&sched.gFree.lock)
5590 }
5591
5592
5593 func Breakpoint() {
5594 breakpoint()
5595 }
5596
5597
5598
5599
5600
5601
5602 func dolockOSThread() {
5603 if GOARCH == "wasm" {
5604 return
5605 }
5606 gp := getg()
5607 gp.m.lockedg.set(gp)
5608 gp.lockedm.set(gp.m)
5609 }
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627 func LockOSThread() {
5628 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5629
5630
5631
5632 startTemplateThread()
5633 }
5634 gp := getg()
5635 gp.m.lockedExt++
5636 if gp.m.lockedExt == 0 {
5637 gp.m.lockedExt--
5638 panic("LockOSThread nesting overflow")
5639 }
5640 dolockOSThread()
5641 }
5642
5643
5644 func lockOSThread() {
5645 getg().m.lockedInt++
5646 dolockOSThread()
5647 }
5648
5649
5650
5651
5652
5653
5654 func dounlockOSThread() {
5655 if GOARCH == "wasm" {
5656 return
5657 }
5658 gp := getg()
5659 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5660 return
5661 }
5662 gp.m.lockedg = 0
5663 gp.lockedm = 0
5664 }
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680 func UnlockOSThread() {
5681 gp := getg()
5682 if gp.m.lockedExt == 0 {
5683 return
5684 }
5685 gp.m.lockedExt--
5686 dounlockOSThread()
5687 }
5688
5689
5690 func unlockOSThread() {
5691 gp := getg()
5692 if gp.m.lockedInt == 0 {
5693 systemstack(badunlockosthread)
5694 }
5695 gp.m.lockedInt--
5696 dounlockOSThread()
5697 }
5698
5699 func badunlockosthread() {
5700 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5701 }
5702
5703 func gcount(includeSys bool) int32 {
5704 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5705 if !includeSys {
5706 n -= sched.ngsys.Load()
5707 }
5708 for _, pp := range allp {
5709 n -= pp.gFree.size
5710 }
5711
5712
5713
5714 if n < 1 {
5715 n = 1
5716 }
5717 return n
5718 }
5719
5720
5721
5722
5723
5724 func goroutineleakcount() int {
5725 return work.goroutineLeak.count
5726 }
5727
5728 func mcount() int32 {
5729 return int32(sched.mnext - sched.nmfreed)
5730 }
5731
5732 var prof struct {
5733 signalLock atomic.Uint32
5734
5735
5736
5737 hz atomic.Int32
5738 }
5739
5740 func _System() { _System() }
5741 func _ExternalCode() { _ExternalCode() }
5742 func _LostExternalCode() { _LostExternalCode() }
5743 func _GC() { _GC() }
5744 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5745 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5746 func _VDSO() { _VDSO() }
5747
5748
5749
5750
5751
5752 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5753 if prof.hz.Load() == 0 {
5754 return
5755 }
5756
5757
5758
5759
5760 if mp != nil && mp.profilehz == 0 {
5761 return
5762 }
5763
5764
5765
5766
5767
5768
5769
5770 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5771 if f := findfunc(pc); f.valid() {
5772 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5773 cpuprof.lostAtomic++
5774 return
5775 }
5776 }
5777 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5778
5779
5780
5781 cpuprof.lostAtomic++
5782 return
5783 }
5784 }
5785
5786
5787
5788
5789
5790
5791
5792 getg().m.mallocing++
5793
5794 var u unwinder
5795 var stk [maxCPUProfStack]uintptr
5796 n := 0
5797 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5798 cgoOff := 0
5799
5800
5801
5802
5803
5804 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5805 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5806 cgoOff++
5807 }
5808 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5809 mp.cgoCallers[0] = 0
5810 }
5811
5812
5813 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5814 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5815
5816
5817 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5818 } else if mp != nil && mp.vdsoSP != 0 {
5819
5820
5821 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5822 } else {
5823 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5824 }
5825 n += tracebackPCs(&u, 0, stk[n:])
5826
5827 if n <= 0 {
5828
5829
5830 n = 2
5831 if inVDSOPage(pc) {
5832 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5833 } else if pc > firstmoduledata.etext {
5834
5835 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5836 }
5837 stk[0] = pc
5838 if mp.preemptoff != "" {
5839 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5840 } else {
5841 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5842 }
5843 }
5844
5845 if prof.hz.Load() != 0 {
5846
5847
5848
5849 var tagPtr *unsafe.Pointer
5850 if gp != nil && gp.m != nil && gp.m.curg != nil {
5851 tagPtr = &gp.m.curg.labels
5852 }
5853 cpuprof.add(tagPtr, stk[:n])
5854
5855 gprof := gp
5856 var mp *m
5857 var pp *p
5858 if gp != nil && gp.m != nil {
5859 if gp.m.curg != nil {
5860 gprof = gp.m.curg
5861 }
5862 mp = gp.m
5863 pp = gp.m.p.ptr()
5864 }
5865 traceCPUSample(gprof, mp, pp, stk[:n])
5866 }
5867 getg().m.mallocing--
5868 }
5869
5870
5871
5872 func setcpuprofilerate(hz int32) {
5873
5874 if hz < 0 {
5875 hz = 0
5876 }
5877
5878
5879
5880 gp := getg()
5881 gp.m.locks++
5882
5883
5884
5885
5886 setThreadCPUProfiler(0)
5887
5888 for !prof.signalLock.CompareAndSwap(0, 1) {
5889 osyield()
5890 }
5891 if prof.hz.Load() != hz {
5892 setProcessCPUProfiler(hz)
5893 prof.hz.Store(hz)
5894 }
5895 prof.signalLock.Store(0)
5896
5897 lock(&sched.lock)
5898 sched.profilehz = hz
5899 unlock(&sched.lock)
5900
5901 if hz != 0 {
5902 setThreadCPUProfiler(hz)
5903 }
5904
5905 gp.m.locks--
5906 }
5907
5908
5909
5910 func (pp *p) init(id int32) {
5911 pp.id = id
5912 pp.gcw.id = id
5913 pp.status = _Pgcstop
5914 pp.sudogcache = pp.sudogbuf[:0]
5915 pp.deferpool = pp.deferpoolbuf[:0]
5916 pp.wbBuf.reset()
5917 if pp.mcache == nil {
5918 if id == 0 {
5919 if mcache0 == nil {
5920 throw("missing mcache?")
5921 }
5922
5923
5924 pp.mcache = mcache0
5925 } else {
5926 pp.mcache = allocmcache()
5927 }
5928 }
5929 if raceenabled && pp.raceprocctx == 0 {
5930 if id == 0 {
5931 pp.raceprocctx = raceprocctx0
5932 raceprocctx0 = 0
5933 } else {
5934 pp.raceprocctx = raceproccreate()
5935 }
5936 }
5937 lockInit(&pp.timers.mu, lockRankTimers)
5938
5939
5940
5941 timerpMask.set(id)
5942
5943
5944 idlepMask.clear(id)
5945 }
5946
5947
5948
5949
5950
5951 func (pp *p) destroy() {
5952 assertLockHeld(&sched.lock)
5953 assertWorldStopped()
5954
5955
5956 for pp.runqhead != pp.runqtail {
5957
5958 pp.runqtail--
5959 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5960
5961 globrunqputhead(gp)
5962 }
5963 if pp.runnext != 0 {
5964 globrunqputhead(pp.runnext.ptr())
5965 pp.runnext = 0
5966 }
5967
5968
5969 getg().m.p.ptr().timers.take(&pp.timers)
5970
5971
5972
5973 if phase := gcphase; phase != _GCoff {
5974 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5975 throw("P destroyed while GC is running")
5976 }
5977
5978 pp.gcw.spanq.destroy()
5979
5980 clear(pp.sudogbuf[:])
5981 pp.sudogcache = pp.sudogbuf[:0]
5982 pp.pinnerCache = nil
5983 clear(pp.deferpoolbuf[:])
5984 pp.deferpool = pp.deferpoolbuf[:0]
5985 systemstack(func() {
5986 for i := 0; i < pp.mspancache.len; i++ {
5987
5988 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5989 }
5990 pp.mspancache.len = 0
5991 lock(&mheap_.lock)
5992 pp.pcache.flush(&mheap_.pages)
5993 unlock(&mheap_.lock)
5994 })
5995 freemcache(pp.mcache)
5996 pp.mcache = nil
5997 gfpurge(pp)
5998 if raceenabled {
5999 if pp.timers.raceCtx != 0 {
6000
6001
6002
6003
6004
6005 mp := getg().m
6006 phold := mp.p.ptr()
6007 mp.p.set(pp)
6008
6009 racectxend(pp.timers.raceCtx)
6010 pp.timers.raceCtx = 0
6011
6012 mp.p.set(phold)
6013 }
6014 raceprocdestroy(pp.raceprocctx)
6015 pp.raceprocctx = 0
6016 }
6017 pp.gcAssistTime = 0
6018 gcCleanups.queued += pp.cleanupsQueued
6019 pp.cleanupsQueued = 0
6020 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6021 pp.goroutinesCreated = 0
6022 pp.xRegs.free()
6023 pp.status = _Pdead
6024 }
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034 func procresize(nprocs int32) *p {
6035 assertLockHeld(&sched.lock)
6036 assertWorldStopped()
6037
6038 old := gomaxprocs
6039 if old < 0 || nprocs <= 0 {
6040 throw("procresize: invalid arg")
6041 }
6042 trace := traceAcquire()
6043 if trace.ok() {
6044 trace.Gomaxprocs(nprocs)
6045 traceRelease(trace)
6046 }
6047
6048
6049 now := nanotime()
6050 if sched.procresizetime != 0 {
6051 sched.totaltime += int64(old) * (now - sched.procresizetime)
6052 }
6053 sched.procresizetime = now
6054
6055
6056 if nprocs > int32(len(allp)) {
6057
6058
6059 lock(&allpLock)
6060 if nprocs <= int32(cap(allp)) {
6061 allp = allp[:nprocs]
6062 } else {
6063 nallp := make([]*p, nprocs)
6064
6065
6066 copy(nallp, allp[:cap(allp)])
6067 allp = nallp
6068 }
6069
6070 idlepMask = idlepMask.resize(nprocs)
6071 timerpMask = timerpMask.resize(nprocs)
6072 work.spanqMask = work.spanqMask.resize(nprocs)
6073 unlock(&allpLock)
6074 }
6075
6076
6077 for i := old; i < nprocs; i++ {
6078 pp := allp[i]
6079 if pp == nil {
6080 pp = new(p)
6081 }
6082 pp.init(i)
6083 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6084 }
6085
6086 gp := getg()
6087 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6088
6089 gp.m.p.ptr().status = _Prunning
6090 gp.m.p.ptr().mcache.prepareForSweep()
6091 } else {
6092
6093
6094
6095
6096
6097 if gp.m.p != 0 {
6098 trace := traceAcquire()
6099 if trace.ok() {
6100
6101
6102
6103 trace.GoSched()
6104 trace.ProcStop(gp.m.p.ptr())
6105 traceRelease(trace)
6106 }
6107 gp.m.p.ptr().m = 0
6108 }
6109 gp.m.p = 0
6110 pp := allp[0]
6111 pp.m = 0
6112 pp.status = _Pidle
6113 acquirep(pp)
6114 trace := traceAcquire()
6115 if trace.ok() {
6116 trace.GoStart()
6117 traceRelease(trace)
6118 }
6119 }
6120
6121
6122 mcache0 = nil
6123
6124
6125 for i := nprocs; i < old; i++ {
6126 pp := allp[i]
6127 pp.destroy()
6128
6129 }
6130
6131
6132 if int32(len(allp)) != nprocs {
6133 lock(&allpLock)
6134 allp = allp[:nprocs]
6135 idlepMask = idlepMask.resize(nprocs)
6136 timerpMask = timerpMask.resize(nprocs)
6137 work.spanqMask = work.spanqMask.resize(nprocs)
6138 unlock(&allpLock)
6139 }
6140
6141
6142 var runnablePs *p
6143 var runnablePsNeedM *p
6144 var idlePs *p
6145 for i := nprocs - 1; i >= 0; i-- {
6146 pp := allp[i]
6147 if gp.m.p.ptr() == pp {
6148 continue
6149 }
6150 pp.status = _Pidle
6151 if runqempty(pp) {
6152 pp.link.set(idlePs)
6153 idlePs = pp
6154 continue
6155 }
6156
6157
6158
6159
6160
6161
6162
6163
6164 var mp *m
6165 if oldm := pp.oldm.get(); oldm != nil {
6166
6167 mp = mgetSpecific(oldm)
6168 }
6169 if mp == nil {
6170
6171 pp.link.set(runnablePsNeedM)
6172 runnablePsNeedM = pp
6173 continue
6174 }
6175 pp.m.set(mp)
6176 pp.link.set(runnablePs)
6177 runnablePs = pp
6178 }
6179
6180
6181 for runnablePsNeedM != nil {
6182 pp := runnablePsNeedM
6183 runnablePsNeedM = pp.link.ptr()
6184
6185 mp := mget()
6186 pp.m.set(mp)
6187 pp.link.set(runnablePs)
6188 runnablePs = pp
6189 }
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215 if gcBlackenEnabled != 0 {
6216 for idlePs != nil {
6217 pp := idlePs
6218
6219 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6220 if !ok {
6221
6222 break
6223 }
6224
6225
6226
6227
6228
6229
6230
6231
6232 idlePs = pp.link.ptr()
6233 mp := mget()
6234 pp.m.set(mp)
6235 pp.link.set(runnablePs)
6236 runnablePs = pp
6237 }
6238 }
6239
6240
6241 for idlePs != nil {
6242 pp := idlePs
6243 idlePs = pp.link.ptr()
6244 pidleput(pp, now)
6245 }
6246
6247 stealOrder.reset(uint32(nprocs))
6248 var int32p *int32 = &gomaxprocs
6249 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6250 if old != nprocs {
6251
6252 gcCPULimiter.resetCapacity(now, nprocs)
6253 }
6254 return runnablePs
6255 }
6256
6257
6258
6259
6260
6261
6262
6263 func acquirep(pp *p) {
6264
6265 acquirepNoTrace(pp)
6266
6267
6268 trace := traceAcquire()
6269 if trace.ok() {
6270 trace.ProcStart()
6271 traceRelease(trace)
6272 }
6273 }
6274
6275
6276
6277
6278 func acquirepNoTrace(pp *p) {
6279
6280 wirep(pp)
6281
6282
6283
6284
6285
6286
6287 pp.oldm = pp.m.ptr().self
6288
6289
6290
6291 pp.mcache.prepareForSweep()
6292 }
6293
6294
6295
6296
6297
6298
6299
6300 func wirep(pp *p) {
6301 gp := getg()
6302
6303 if gp.m.p != 0 {
6304
6305
6306 systemstack(func() {
6307 throw("wirep: already in go")
6308 })
6309 }
6310 if pp.m != 0 || pp.status != _Pidle {
6311
6312
6313 systemstack(func() {
6314 id := int64(0)
6315 if pp.m != 0 {
6316 id = pp.m.ptr().id
6317 }
6318 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6319 throw("wirep: invalid p state")
6320 })
6321 }
6322 gp.m.p.set(pp)
6323 pp.m.set(gp.m)
6324 pp.status = _Prunning
6325 }
6326
6327
6328 func releasep() *p {
6329 trace := traceAcquire()
6330 if trace.ok() {
6331 trace.ProcStop(getg().m.p.ptr())
6332 traceRelease(trace)
6333 }
6334 return releasepNoTrace()
6335 }
6336
6337
6338 func releasepNoTrace() *p {
6339 gp := getg()
6340
6341 if gp.m.p == 0 {
6342 throw("releasep: invalid arg")
6343 }
6344 pp := gp.m.p.ptr()
6345 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6346 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6347 throw("releasep: invalid p state")
6348 }
6349
6350
6351 gcController.releaseNextGCMarkWorker(pp)
6352
6353 gp.m.p = 0
6354 pp.m = 0
6355 pp.status = _Pidle
6356 return pp
6357 }
6358
6359 func incidlelocked(v int32) {
6360 lock(&sched.lock)
6361 sched.nmidlelocked += v
6362 if v > 0 {
6363 checkdead()
6364 }
6365 unlock(&sched.lock)
6366 }
6367
6368
6369
6370
6371 func checkdead() {
6372 assertLockHeld(&sched.lock)
6373
6374
6375
6376
6377
6378
6379 if (islibrary || isarchive) && GOARCH != "wasm" {
6380 return
6381 }
6382
6383
6384
6385
6386
6387 if panicking.Load() > 0 {
6388 return
6389 }
6390
6391
6392
6393
6394
6395 var run0 int32
6396 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6397 run0 = 1
6398 }
6399
6400 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6401 if run > run0 {
6402 return
6403 }
6404 if run < 0 {
6405 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6406 unlock(&sched.lock)
6407 throw("checkdead: inconsistent counts")
6408 }
6409
6410 grunning := 0
6411 forEachG(func(gp *g) {
6412 if isSystemGoroutine(gp, false) {
6413 return
6414 }
6415 s := readgstatus(gp)
6416 switch s &^ _Gscan {
6417 case _Gwaiting,
6418 _Gpreempted:
6419 grunning++
6420 case _Grunnable,
6421 _Grunning,
6422 _Gsyscall:
6423 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6424 unlock(&sched.lock)
6425 throw("checkdead: runnable g")
6426 }
6427 })
6428 if grunning == 0 {
6429 unlock(&sched.lock)
6430 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6431 }
6432
6433
6434 if faketime != 0 {
6435 if when := timeSleepUntil(); when < maxWhen {
6436 faketime = when
6437
6438
6439 pp, _ := pidleget(faketime)
6440 if pp == nil {
6441
6442
6443 unlock(&sched.lock)
6444 throw("checkdead: no p for timer")
6445 }
6446 mp := mget()
6447 if mp == nil {
6448
6449
6450 unlock(&sched.lock)
6451 throw("checkdead: no m for timer")
6452 }
6453
6454
6455
6456 sched.nmspinning.Add(1)
6457 mp.spinning = true
6458 mp.nextp.set(pp)
6459 notewakeup(&mp.park)
6460 return
6461 }
6462 }
6463
6464
6465 for _, pp := range allp {
6466 if len(pp.timers.heap) > 0 {
6467 return
6468 }
6469 }
6470
6471 unlock(&sched.lock)
6472 fatal("all goroutines are asleep - deadlock!")
6473 }
6474
6475
6476
6477
6478
6479
6480 var forcegcperiod int64 = 2 * 60 * 1e9
6481
6482
6483
6484
6485 const haveSysmon = GOARCH != "wasm"
6486
6487
6488
6489
6490 func sysmon() {
6491 lock(&sched.lock)
6492 sched.nmsys++
6493 checkdead()
6494 unlock(&sched.lock)
6495
6496 lastgomaxprocs := int64(0)
6497 lasttrace := int64(0)
6498 idle := 0
6499 delay := uint32(0)
6500
6501 for {
6502 if idle == 0 {
6503 delay = 20
6504 } else if idle > 50 {
6505 delay *= 2
6506 }
6507 if delay > 10*1000 {
6508 delay = 10 * 1000
6509 }
6510 usleep(delay)
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527 now := nanotime()
6528 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6529 lock(&sched.lock)
6530 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6531 syscallWake := false
6532 next := timeSleepUntil()
6533 if next > now {
6534 sched.sysmonwait.Store(true)
6535 unlock(&sched.lock)
6536
6537
6538 sleep := forcegcperiod / 2
6539 if next-now < sleep {
6540 sleep = next - now
6541 }
6542 shouldRelax := sleep >= osRelaxMinNS
6543 if shouldRelax {
6544 osRelax(true)
6545 }
6546 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6547 if shouldRelax {
6548 osRelax(false)
6549 }
6550 lock(&sched.lock)
6551 sched.sysmonwait.Store(false)
6552 noteclear(&sched.sysmonnote)
6553 }
6554 if syscallWake {
6555 idle = 0
6556 delay = 20
6557 }
6558 }
6559 unlock(&sched.lock)
6560 }
6561
6562 lock(&sched.sysmonlock)
6563
6564
6565 now = nanotime()
6566
6567
6568 if *cgo_yield != nil {
6569 asmcgocall(*cgo_yield, nil)
6570 }
6571
6572 lastpoll := sched.lastpoll.Load()
6573 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6574 sched.lastpoll.CompareAndSwap(lastpoll, now)
6575 list, delta := netpoll(0)
6576 if !list.empty() {
6577
6578
6579
6580
6581
6582
6583
6584 incidlelocked(-1)
6585 injectglist(&list)
6586 incidlelocked(1)
6587 netpollAdjustWaiters(delta)
6588 }
6589 }
6590
6591 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6592 sysmonUpdateGOMAXPROCS()
6593 lastgomaxprocs = now
6594 }
6595 if scavenger.sysmonWake.Load() != 0 {
6596
6597 scavenger.wake()
6598 }
6599
6600
6601 if retake(now) != 0 {
6602 idle = 0
6603 } else {
6604 idle++
6605 }
6606
6607 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6608 lock(&forcegc.lock)
6609 forcegc.idle.Store(false)
6610 var list gList
6611 list.push(forcegc.g)
6612 injectglist(&list)
6613 unlock(&forcegc.lock)
6614 }
6615 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6616 lasttrace = now
6617 schedtrace(debug.scheddetail > 0)
6618 }
6619 unlock(&sched.sysmonlock)
6620 }
6621 }
6622
6623 type sysmontick struct {
6624 schedtick uint32
6625 syscalltick uint32
6626 schedwhen int64
6627 syscallwhen int64
6628 }
6629
6630
6631
6632 const forcePreemptNS = 10 * 1000 * 1000
6633
6634 func retake(now int64) uint32 {
6635 n := 0
6636
6637
6638 lock(&allpLock)
6639
6640
6641
6642 for i := 0; i < len(allp); i++ {
6643
6644
6645
6646
6647
6648
6649
6650
6651 pp := allp[i]
6652 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6653
6654
6655 continue
6656 }
6657 pd := &pp.sysmontick
6658 sysretake := false
6659
6660
6661
6662
6663
6664 schedt := int64(pp.schedtick)
6665 if int64(pd.schedtick) != schedt {
6666 pd.schedtick = uint32(schedt)
6667 pd.schedwhen = now
6668 } else if pd.schedwhen+forcePreemptNS <= now {
6669 preemptone(pp)
6670
6671
6672
6673
6674 sysretake = true
6675 }
6676
6677
6678 unlock(&allpLock)
6679
6680
6681
6682
6683
6684
6685
6686
6687 incidlelocked(-1)
6688
6689
6690 thread, ok := setBlockOnExitSyscall(pp)
6691 if !ok {
6692
6693 goto done
6694 }
6695
6696
6697 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6698 pd.syscalltick = uint32(syst)
6699 pd.syscallwhen = now
6700 thread.resume()
6701 goto done
6702 }
6703
6704
6705
6706
6707 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6708 thread.resume()
6709 goto done
6710 }
6711
6712
6713
6714 thread.takeP()
6715 thread.resume()
6716 n++
6717
6718
6719 handoffp(pp)
6720
6721
6722
6723 done:
6724 incidlelocked(1)
6725 lock(&allpLock)
6726 }
6727 unlock(&allpLock)
6728 return uint32(n)
6729 }
6730
6731
6732
6733 type syscallingThread struct {
6734 gp *g
6735 mp *m
6736 pp *p
6737 status uint32
6738 }
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6755 if pp.status != _Prunning {
6756 return syscallingThread{}, false
6757 }
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769 mp := pp.m.ptr()
6770 if mp == nil {
6771
6772 return syscallingThread{}, false
6773 }
6774 gp := mp.curg
6775 if gp == nil {
6776
6777 return syscallingThread{}, false
6778 }
6779 status := readgstatus(gp) &^ _Gscan
6780
6781
6782
6783
6784 if status != _Gsyscall && status != _Gdeadextra {
6785
6786 return syscallingThread{}, false
6787 }
6788 if !castogscanstatus(gp, status, status|_Gscan) {
6789
6790 return syscallingThread{}, false
6791 }
6792 if gp.m != mp || gp.m.p.ptr() != pp {
6793
6794 casfrom_Gscanstatus(gp, status|_Gscan, status)
6795 return syscallingThread{}, false
6796 }
6797 return syscallingThread{gp, mp, pp, status}, true
6798 }
6799
6800
6801
6802
6803
6804 func (s syscallingThread) gcstopP() {
6805 assertLockHeld(&sched.lock)
6806
6807 s.releaseP(_Pgcstop)
6808 s.pp.gcStopTime = nanotime()
6809 sched.stopwait--
6810 }
6811
6812
6813
6814 func (s syscallingThread) takeP() {
6815 s.releaseP(_Pidle)
6816 }
6817
6818
6819
6820
6821 func (s syscallingThread) releaseP(state uint32) {
6822 if state != _Pidle && state != _Pgcstop {
6823 throw("attempted to release P into a bad state")
6824 }
6825 trace := traceAcquire()
6826 s.pp.m = 0
6827 s.mp.p = 0
6828 atomic.Store(&s.pp.status, state)
6829 if trace.ok() {
6830 trace.ProcSteal(s.pp)
6831 traceRelease(trace)
6832 }
6833 addGSyscallNoP(s.mp)
6834 s.pp.syscalltick++
6835 }
6836
6837
6838 func (s syscallingThread) resume() {
6839 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6840 }
6841
6842
6843
6844
6845
6846
6847 func preemptall() bool {
6848 res := false
6849 for _, pp := range allp {
6850 if pp.status != _Prunning {
6851 continue
6852 }
6853 if preemptone(pp) {
6854 res = true
6855 }
6856 }
6857 return res
6858 }
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870 func preemptone(pp *p) bool {
6871 mp := pp.m.ptr()
6872 if mp == nil || mp == getg().m {
6873 return false
6874 }
6875 gp := mp.curg
6876 if gp == nil || gp == mp.g0 {
6877 return false
6878 }
6879 if readgstatus(gp)&^_Gscan == _Gsyscall {
6880
6881 return false
6882 }
6883
6884 gp.preempt = true
6885
6886
6887
6888
6889
6890 gp.stackguard0 = stackPreempt
6891
6892
6893 if preemptMSupported && debug.asyncpreemptoff == 0 {
6894 pp.preempt = true
6895 preemptM(mp)
6896 }
6897
6898 return true
6899 }
6900
6901 var starttime int64
6902
6903 func schedtrace(detailed bool) {
6904 now := nanotime()
6905 if starttime == 0 {
6906 starttime = now
6907 }
6908
6909 lock(&sched.lock)
6910 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6911 if detailed {
6912 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6913 }
6914
6915
6916
6917 for i, pp := range allp {
6918 h := atomic.Load(&pp.runqhead)
6919 t := atomic.Load(&pp.runqtail)
6920 if detailed {
6921 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6922 mp := pp.m.ptr()
6923 if mp != nil {
6924 print(mp.id)
6925 } else {
6926 print("nil")
6927 }
6928 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6929 } else {
6930
6931
6932 print(" ")
6933 if i == 0 {
6934 print("[ ")
6935 }
6936 print(t - h)
6937 if i == len(allp)-1 {
6938 print(" ]")
6939 }
6940 }
6941 }
6942
6943 if !detailed {
6944
6945 print(" schedticks=[ ")
6946 for _, pp := range allp {
6947 print(pp.schedtick)
6948 print(" ")
6949 }
6950 print("]\n")
6951 }
6952
6953 if !detailed {
6954 unlock(&sched.lock)
6955 return
6956 }
6957
6958 for mp := allm; mp != nil; mp = mp.alllink {
6959 pp := mp.p.ptr()
6960 print(" M", mp.id, ": p=")
6961 if pp != nil {
6962 print(pp.id)
6963 } else {
6964 print("nil")
6965 }
6966 print(" curg=")
6967 if mp.curg != nil {
6968 print(mp.curg.goid)
6969 } else {
6970 print("nil")
6971 }
6972 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6973 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6974 print(lockedg.goid)
6975 } else {
6976 print("nil")
6977 }
6978 print("\n")
6979 }
6980
6981 forEachG(func(gp *g) {
6982 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6983 if gp.m != nil {
6984 print(gp.m.id)
6985 } else {
6986 print("nil")
6987 }
6988 print(" lockedm=")
6989 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6990 print(lockedm.id)
6991 } else {
6992 print("nil")
6993 }
6994 print("\n")
6995 })
6996 unlock(&sched.lock)
6997 }
6998
6999 type updateMaxProcsGState struct {
7000 lock mutex
7001 g *g
7002 idle atomic.Bool
7003
7004
7005 procs int32
7006 }
7007
7008 var (
7009
7010
7011 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7012
7013
7014
7015 updateMaxProcsG updateMaxProcsGState
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064 computeMaxProcsLock mutex
7065 )
7066
7067
7068
7069
7070 func defaultGOMAXPROCSUpdateEnable() {
7071 if debug.updatemaxprocs == 0 {
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083 updatemaxprocs.IncNonDefault()
7084 return
7085 }
7086
7087 go updateMaxProcsGoroutine()
7088 }
7089
7090 func updateMaxProcsGoroutine() {
7091 updateMaxProcsG.g = getg()
7092 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7093 for {
7094 lock(&updateMaxProcsG.lock)
7095 if updateMaxProcsG.idle.Load() {
7096 throw("updateMaxProcsGoroutine: phase error")
7097 }
7098 updateMaxProcsG.idle.Store(true)
7099 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7100
7101
7102 stw := stopTheWorldGC(stwGOMAXPROCS)
7103
7104
7105 lock(&sched.lock)
7106 custom := sched.customGOMAXPROCS
7107 unlock(&sched.lock)
7108 if custom {
7109 startTheWorldGC(stw)
7110 return
7111 }
7112
7113
7114
7115
7116
7117 newprocs = updateMaxProcsG.procs
7118 lock(&sched.lock)
7119 sched.customGOMAXPROCS = false
7120 unlock(&sched.lock)
7121
7122 startTheWorldGC(stw)
7123 }
7124 }
7125
7126 func sysmonUpdateGOMAXPROCS() {
7127
7128 lock(&computeMaxProcsLock)
7129
7130
7131 lock(&sched.lock)
7132 custom := sched.customGOMAXPROCS
7133 curr := gomaxprocs
7134 unlock(&sched.lock)
7135 if custom {
7136 unlock(&computeMaxProcsLock)
7137 return
7138 }
7139
7140
7141 procs := defaultGOMAXPROCS(0)
7142 unlock(&computeMaxProcsLock)
7143 if procs == curr {
7144
7145 return
7146 }
7147
7148
7149
7150
7151 if updateMaxProcsG.idle.Load() {
7152 lock(&updateMaxProcsG.lock)
7153 updateMaxProcsG.procs = procs
7154 updateMaxProcsG.idle.Store(false)
7155 var list gList
7156 list.push(updateMaxProcsG.g)
7157 injectglist(&list)
7158 unlock(&updateMaxProcsG.lock)
7159 }
7160 }
7161
7162
7163
7164
7165
7166
7167 func schedEnableUser(enable bool) {
7168 lock(&sched.lock)
7169 if sched.disable.user == !enable {
7170 unlock(&sched.lock)
7171 return
7172 }
7173 sched.disable.user = !enable
7174 if enable {
7175 n := sched.disable.runnable.size
7176 globrunqputbatch(&sched.disable.runnable)
7177 unlock(&sched.lock)
7178 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7179 startm(nil, false, false)
7180 }
7181 } else {
7182 unlock(&sched.lock)
7183 }
7184 }
7185
7186
7187
7188
7189
7190 func schedEnabled(gp *g) bool {
7191 assertLockHeld(&sched.lock)
7192
7193 if sched.disable.user {
7194 return isSystemGoroutine(gp, true)
7195 }
7196 return true
7197 }
7198
7199
7200
7201
7202
7203
7204 func mput(mp *m) {
7205 assertLockHeld(&sched.lock)
7206
7207 sched.midle.push(unsafe.Pointer(mp))
7208 sched.nmidle++
7209 checkdead()
7210 }
7211
7212
7213
7214
7215
7216
7217 func mget() *m {
7218 assertLockHeld(&sched.lock)
7219
7220 mp := (*m)(sched.midle.pop())
7221 if mp != nil {
7222 sched.nmidle--
7223 }
7224 return mp
7225 }
7226
7227
7228
7229
7230
7231
7232
7233
7234 func mgetSpecific(mp *m) *m {
7235 assertLockHeld(&sched.lock)
7236
7237 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7238
7239 return nil
7240 }
7241
7242 sched.midle.remove(unsafe.Pointer(mp))
7243 sched.nmidle--
7244
7245 return mp
7246 }
7247
7248
7249
7250
7251
7252
7253 func globrunqput(gp *g) {
7254 assertLockHeld(&sched.lock)
7255
7256 sched.runq.pushBack(gp)
7257 }
7258
7259
7260
7261
7262
7263
7264 func globrunqputhead(gp *g) {
7265 assertLockHeld(&sched.lock)
7266
7267 sched.runq.push(gp)
7268 }
7269
7270
7271
7272
7273
7274
7275
7276 func globrunqputbatch(batch *gQueue) {
7277 assertLockHeld(&sched.lock)
7278
7279 sched.runq.pushBackAll(*batch)
7280 *batch = gQueue{}
7281 }
7282
7283
7284
7285 func globrunqget() *g {
7286 assertLockHeld(&sched.lock)
7287
7288 if sched.runq.size == 0 {
7289 return nil
7290 }
7291
7292 return sched.runq.pop()
7293 }
7294
7295
7296
7297 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7298 assertLockHeld(&sched.lock)
7299
7300 if sched.runq.size == 0 {
7301 return
7302 }
7303
7304 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7305
7306 gp = sched.runq.pop()
7307 n--
7308
7309 for ; n > 0; n-- {
7310 gp1 := sched.runq.pop()
7311 q.pushBack(gp1)
7312 }
7313 return
7314 }
7315
7316
7317 type pMask []uint32
7318
7319
7320 func (p pMask) read(id uint32) bool {
7321 word := id / 32
7322 mask := uint32(1) << (id % 32)
7323 return (atomic.Load(&p[word]) & mask) != 0
7324 }
7325
7326
7327 func (p pMask) set(id int32) {
7328 word := id / 32
7329 mask := uint32(1) << (id % 32)
7330 atomic.Or(&p[word], mask)
7331 }
7332
7333
7334 func (p pMask) clear(id int32) {
7335 word := id / 32
7336 mask := uint32(1) << (id % 32)
7337 atomic.And(&p[word], ^mask)
7338 }
7339
7340
7341 func (p pMask) any() bool {
7342 for i := range p {
7343 if atomic.Load(&p[i]) != 0 {
7344 return true
7345 }
7346 }
7347 return false
7348 }
7349
7350
7351
7352
7353
7354 func (p pMask) resize(nprocs int32) pMask {
7355 maskWords := (nprocs + 31) / 32
7356
7357 if maskWords <= int32(cap(p)) {
7358 return p[:maskWords]
7359 }
7360 newMask := make([]uint32, maskWords)
7361
7362 copy(newMask, p)
7363 return newMask
7364 }
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377 func pidleput(pp *p, now int64) int64 {
7378 assertLockHeld(&sched.lock)
7379
7380 if !runqempty(pp) {
7381 throw("pidleput: P has non-empty run queue")
7382 }
7383 if now == 0 {
7384 now = nanotime()
7385 }
7386 if pp.timers.len.Load() == 0 {
7387 timerpMask.clear(pp.id)
7388 }
7389 idlepMask.set(pp.id)
7390 pp.link = sched.pidle
7391 sched.pidle.set(pp)
7392 sched.npidle.Add(1)
7393 if !pp.limiterEvent.start(limiterEventIdle, now) {
7394 throw("must be able to track idle limiter event")
7395 }
7396 return now
7397 }
7398
7399
7400
7401
7402
7403
7404
7405
7406 func pidleget(now int64) (*p, int64) {
7407 assertLockHeld(&sched.lock)
7408
7409 pp := sched.pidle.ptr()
7410 if pp != nil {
7411
7412 if now == 0 {
7413 now = nanotime()
7414 }
7415 timerpMask.set(pp.id)
7416 idlepMask.clear(pp.id)
7417 sched.pidle = pp.link
7418 sched.npidle.Add(-1)
7419 pp.limiterEvent.stop(limiterEventIdle, now)
7420 }
7421 return pp, now
7422 }
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434 func pidlegetSpinning(now int64) (*p, int64) {
7435 assertLockHeld(&sched.lock)
7436
7437 pp, now := pidleget(now)
7438 if pp == nil {
7439
7440
7441
7442 sched.needspinning.Store(1)
7443 return nil, now
7444 }
7445
7446 return pp, now
7447 }
7448
7449
7450
7451 func runqempty(pp *p) bool {
7452
7453
7454
7455
7456 for {
7457 head := atomic.Load(&pp.runqhead)
7458 tail := atomic.Load(&pp.runqtail)
7459 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7460 if tail == atomic.Load(&pp.runqtail) {
7461 return head == tail && runnext == 0
7462 }
7463 }
7464 }
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475 const randomizeScheduler = raceenabled
7476
7477
7478
7479
7480
7481
7482 func runqput(pp *p, gp *g, next bool) {
7483 if !haveSysmon && next {
7484
7485
7486
7487
7488
7489
7490
7491
7492 next = false
7493 }
7494 if randomizeScheduler && next && randn(2) == 0 {
7495 next = false
7496 }
7497
7498 if next {
7499 retryNext:
7500 oldnext := pp.runnext
7501 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7502 goto retryNext
7503 }
7504 if oldnext == 0 {
7505 return
7506 }
7507
7508 gp = oldnext.ptr()
7509 }
7510
7511 retry:
7512 h := atomic.LoadAcq(&pp.runqhead)
7513 t := pp.runqtail
7514 if t-h < uint32(len(pp.runq)) {
7515 pp.runq[t%uint32(len(pp.runq))].set(gp)
7516 atomic.StoreRel(&pp.runqtail, t+1)
7517 return
7518 }
7519 if runqputslow(pp, gp, h, t) {
7520 return
7521 }
7522
7523 goto retry
7524 }
7525
7526
7527
7528 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7529 var batch [len(pp.runq)/2 + 1]*g
7530
7531
7532 n := t - h
7533 n = n / 2
7534 if n != uint32(len(pp.runq)/2) {
7535 throw("runqputslow: queue is not full")
7536 }
7537 for i := uint32(0); i < n; i++ {
7538 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7539 }
7540 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7541 return false
7542 }
7543 batch[n] = gp
7544
7545 if randomizeScheduler {
7546 for i := uint32(1); i <= n; i++ {
7547 j := cheaprandn(i + 1)
7548 batch[i], batch[j] = batch[j], batch[i]
7549 }
7550 }
7551
7552
7553 for i := uint32(0); i < n; i++ {
7554 batch[i].schedlink.set(batch[i+1])
7555 }
7556
7557 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7558
7559
7560 lock(&sched.lock)
7561 globrunqputbatch(&q)
7562 unlock(&sched.lock)
7563 return true
7564 }
7565
7566
7567
7568
7569 func runqputbatch(pp *p, q *gQueue) {
7570 if q.empty() {
7571 return
7572 }
7573 h := atomic.LoadAcq(&pp.runqhead)
7574 t := pp.runqtail
7575 n := uint32(0)
7576 for !q.empty() && t-h < uint32(len(pp.runq)) {
7577 gp := q.pop()
7578 pp.runq[t%uint32(len(pp.runq))].set(gp)
7579 t++
7580 n++
7581 }
7582
7583 if randomizeScheduler {
7584 off := func(o uint32) uint32 {
7585 return (pp.runqtail + o) % uint32(len(pp.runq))
7586 }
7587 for i := uint32(1); i < n; i++ {
7588 j := cheaprandn(i + 1)
7589 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7590 }
7591 }
7592
7593 atomic.StoreRel(&pp.runqtail, t)
7594
7595 return
7596 }
7597
7598
7599
7600
7601
7602 func runqget(pp *p) (gp *g, inheritTime bool) {
7603
7604 next := pp.runnext
7605
7606
7607
7608 if next != 0 && pp.runnext.cas(next, 0) {
7609 return next.ptr(), true
7610 }
7611
7612 for {
7613 h := atomic.LoadAcq(&pp.runqhead)
7614 t := pp.runqtail
7615 if t == h {
7616 return nil, false
7617 }
7618 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7619 if atomic.CasRel(&pp.runqhead, h, h+1) {
7620 return gp, false
7621 }
7622 }
7623 }
7624
7625
7626
7627 func runqdrain(pp *p) (drainQ gQueue) {
7628 oldNext := pp.runnext
7629 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7630 drainQ.pushBack(oldNext.ptr())
7631 }
7632
7633 retry:
7634 h := atomic.LoadAcq(&pp.runqhead)
7635 t := pp.runqtail
7636 qn := t - h
7637 if qn == 0 {
7638 return
7639 }
7640 if qn > uint32(len(pp.runq)) {
7641 goto retry
7642 }
7643
7644 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7645 goto retry
7646 }
7647
7648
7649
7650
7651
7652
7653
7654
7655 for i := uint32(0); i < qn; i++ {
7656 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7657 drainQ.pushBack(gp)
7658 }
7659 return
7660 }
7661
7662
7663
7664
7665
7666 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7667 for {
7668 h := atomic.LoadAcq(&pp.runqhead)
7669 t := atomic.LoadAcq(&pp.runqtail)
7670 n := t - h
7671 n = n - n/2
7672 if n == 0 {
7673 if stealRunNextG {
7674
7675 if next := pp.runnext; next != 0 {
7676 if pp.status == _Prunning {
7677 if mp := pp.m.ptr(); mp != nil {
7678 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698 if !osHasLowResTimer {
7699 usleep(3)
7700 } else {
7701
7702
7703
7704 osyield()
7705 }
7706 }
7707 }
7708 }
7709 if !pp.runnext.cas(next, 0) {
7710 continue
7711 }
7712 batch[batchHead%uint32(len(batch))] = next
7713 return 1
7714 }
7715 }
7716 return 0
7717 }
7718 if n > uint32(len(pp.runq)/2) {
7719 continue
7720 }
7721 for i := uint32(0); i < n; i++ {
7722 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7723 batch[(batchHead+i)%uint32(len(batch))] = g
7724 }
7725 if atomic.CasRel(&pp.runqhead, h, h+n) {
7726 return n
7727 }
7728 }
7729 }
7730
7731
7732
7733
7734 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7735 t := pp.runqtail
7736 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7737 if n == 0 {
7738 return nil
7739 }
7740 n--
7741 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7742 if n == 0 {
7743 return gp
7744 }
7745 h := atomic.LoadAcq(&pp.runqhead)
7746 if t-h+n >= uint32(len(pp.runq)) {
7747 throw("runqsteal: runq overflow")
7748 }
7749 atomic.StoreRel(&pp.runqtail, t+n)
7750 return gp
7751 }
7752
7753
7754
7755 type gQueue struct {
7756 head guintptr
7757 tail guintptr
7758 size int32
7759 }
7760
7761
7762 func (q *gQueue) empty() bool {
7763 return q.head == 0
7764 }
7765
7766
7767 func (q *gQueue) push(gp *g) {
7768 gp.schedlink = q.head
7769 q.head.set(gp)
7770 if q.tail == 0 {
7771 q.tail.set(gp)
7772 }
7773 q.size++
7774 }
7775
7776
7777 func (q *gQueue) pushBack(gp *g) {
7778 gp.schedlink = 0
7779 if q.tail != 0 {
7780 q.tail.ptr().schedlink.set(gp)
7781 } else {
7782 q.head.set(gp)
7783 }
7784 q.tail.set(gp)
7785 q.size++
7786 }
7787
7788
7789
7790 func (q *gQueue) pushBackAll(q2 gQueue) {
7791 if q2.tail == 0 {
7792 return
7793 }
7794 q2.tail.ptr().schedlink = 0
7795 if q.tail != 0 {
7796 q.tail.ptr().schedlink = q2.head
7797 } else {
7798 q.head = q2.head
7799 }
7800 q.tail = q2.tail
7801 q.size += q2.size
7802 }
7803
7804
7805
7806 func (q *gQueue) pop() *g {
7807 gp := q.head.ptr()
7808 if gp != nil {
7809 q.head = gp.schedlink
7810 if q.head == 0 {
7811 q.tail = 0
7812 }
7813 q.size--
7814 }
7815 return gp
7816 }
7817
7818
7819 func (q *gQueue) popList() gList {
7820 stack := gList{q.head, q.size}
7821 *q = gQueue{}
7822 return stack
7823 }
7824
7825
7826
7827 type gList struct {
7828 head guintptr
7829 size int32
7830 }
7831
7832
7833 func (l *gList) empty() bool {
7834 return l.head == 0
7835 }
7836
7837
7838 func (l *gList) push(gp *g) {
7839 gp.schedlink = l.head
7840 l.head.set(gp)
7841 l.size++
7842 }
7843
7844
7845 func (l *gList) pushAll(q gQueue) {
7846 if !q.empty() {
7847 q.tail.ptr().schedlink = l.head
7848 l.head = q.head
7849 l.size += q.size
7850 }
7851 }
7852
7853
7854 func (l *gList) pop() *g {
7855 gp := l.head.ptr()
7856 if gp != nil {
7857 l.head = gp.schedlink
7858 l.size--
7859 }
7860 return gp
7861 }
7862
7863
7864 func setMaxThreads(in int) (out int) {
7865 lock(&sched.lock)
7866 out = int(sched.maxmcount)
7867 if in > 0x7fffffff {
7868 sched.maxmcount = 0x7fffffff
7869 } else {
7870 sched.maxmcount = int32(in)
7871 }
7872 checkmcount()
7873 unlock(&sched.lock)
7874 return
7875 }
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889 func procPin() int {
7890 gp := getg()
7891 mp := gp.m
7892
7893 mp.locks++
7894 return int(mp.p.ptr().id)
7895 }
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909 func procUnpin() {
7910 gp := getg()
7911 gp.m.locks--
7912 }
7913
7914
7915
7916 func sync_runtime_procPin() int {
7917 return procPin()
7918 }
7919
7920
7921
7922 func sync_runtime_procUnpin() {
7923 procUnpin()
7924 }
7925
7926
7927
7928 func sync_atomic_runtime_procPin() int {
7929 return procPin()
7930 }
7931
7932
7933
7934 func sync_atomic_runtime_procUnpin() {
7935 procUnpin()
7936 }
7937
7938
7939
7940
7941
7942 func internal_sync_runtime_canSpin(i int) bool {
7943
7944
7945
7946
7947
7948 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7949 return false
7950 }
7951 if p := getg().m.p.ptr(); !runqempty(p) {
7952 return false
7953 }
7954 return true
7955 }
7956
7957
7958
7959 func internal_sync_runtime_doSpin() {
7960 procyield(active_spin_cnt)
7961 }
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977 func sync_runtime_canSpin(i int) bool {
7978 return internal_sync_runtime_canSpin(i)
7979 }
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993 func sync_runtime_doSpin() {
7994 internal_sync_runtime_doSpin()
7995 }
7996
7997 var stealOrder randomOrder
7998
7999
8000
8001
8002
8003 type randomOrder struct {
8004 count uint32
8005 coprimes []uint32
8006 }
8007
8008 type randomEnum struct {
8009 i uint32
8010 count uint32
8011 pos uint32
8012 inc uint32
8013 }
8014
8015 func (ord *randomOrder) reset(count uint32) {
8016 ord.count = count
8017 ord.coprimes = ord.coprimes[:0]
8018 for i := uint32(1); i <= count; i++ {
8019 if gcd(i, count) == 1 {
8020 ord.coprimes = append(ord.coprimes, i)
8021 }
8022 }
8023 }
8024
8025 func (ord *randomOrder) start(i uint32) randomEnum {
8026 return randomEnum{
8027 count: ord.count,
8028 pos: i % ord.count,
8029 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8030 }
8031 }
8032
8033 func (enum *randomEnum) done() bool {
8034 return enum.i == enum.count
8035 }
8036
8037 func (enum *randomEnum) next() {
8038 enum.i++
8039 enum.pos = (enum.pos + enum.inc) % enum.count
8040 }
8041
8042 func (enum *randomEnum) position() uint32 {
8043 return enum.pos
8044 }
8045
8046 func gcd(a, b uint32) uint32 {
8047 for b != 0 {
8048 a, b = b, a%b
8049 }
8050 return a
8051 }
8052
8053
8054
8055 type initTask struct {
8056 state uint32
8057 nfns uint32
8058
8059 }
8060
8061
8062
8063 var inittrace tracestat
8064
8065 type tracestat struct {
8066 active bool
8067 id uint64
8068 allocs uint64
8069 bytes uint64
8070 }
8071
8072 func doInit(ts []*initTask) {
8073 for _, t := range ts {
8074 doInit1(t)
8075 }
8076 }
8077
8078 func doInit1(t *initTask) {
8079 switch t.state {
8080 case 2:
8081 return
8082 case 1:
8083 throw("recursive call during initialization - linker skew")
8084 default:
8085 t.state = 1
8086
8087 var (
8088 start int64
8089 before tracestat
8090 )
8091
8092 if inittrace.active {
8093 start = nanotime()
8094
8095 before = inittrace
8096 }
8097
8098 if t.nfns == 0 {
8099
8100 throw("inittask with no functions")
8101 }
8102
8103 firstFunc := add(unsafe.Pointer(t), 8)
8104 for i := uint32(0); i < t.nfns; i++ {
8105 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8106 f := *(*func())(unsafe.Pointer(&p))
8107 f()
8108 }
8109
8110 if inittrace.active {
8111 end := nanotime()
8112
8113 after := inittrace
8114
8115 f := *(*func())(unsafe.Pointer(&firstFunc))
8116 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8117
8118 var sbuf [24]byte
8119 print("init ", pkg, " @")
8120 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8121 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8122 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8123 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8124 print("\n")
8125 }
8126
8127 t.state = 2
8128 }
8129 }
8130
View as plain text