Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/maps"
16 "internal/runtime/sys"
17 "internal/strconv"
18 "internal/stringslite"
19 "unsafe"
20 )
21
22
23 var modinfo string
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119 var (
120 m0 m
121 g0 g
122 mcache0 *mcache
123 raceprocctx0 uintptr
124 raceFiniLock mutex
125 )
126
127
128
129 var runtime_inittasks []*initTask
130
131
132
133 var mainInitDone atomic.Bool
134
135
136
137
138 var mainInitDoneChan chan bool
139
140
141 func main_main()
142
143
144 var mainStarted bool
145
146
147 var runtimeInitTime int64
148
149
150 var initSigmask sigset
151
152
153 func main() {
154 mp := getg().m
155
156
157
158 mp.g0.racectx = 0
159
160
161
162
163 if goarch.PtrSize == 8 {
164 maxstacksize = 1000000000
165 } else {
166 maxstacksize = 250000000
167 }
168
169
170
171
172 maxstackceiling = 2 * maxstacksize
173
174
175 mainStarted = true
176
177 if haveSysmon {
178 systemstack(func() {
179 newm(sysmon, nil, -1)
180 })
181 }
182
183
184
185
186
187
188
189 lockOSThread()
190
191 if mp != &m0 {
192 throw("runtime.main not on m0")
193 }
194
195
196
197 runtimeInitTime = nanotime()
198 if runtimeInitTime == 0 {
199 throw("nanotime returning zero")
200 }
201
202 if debug.inittrace != 0 {
203 inittrace.id = getg().goid
204 inittrace.active = true
205 }
206
207 doInit(runtime_inittasks)
208
209
210 needUnlock := true
211 defer func() {
212 if needUnlock {
213 unlockOSThread()
214 }
215 }()
216
217 gcenable()
218 defaultGOMAXPROCSUpdateEnable()
219
220 mainInitDoneChan = make(chan bool)
221 if iscgo {
222 if _cgo_pthread_key_created == nil {
223 throw("_cgo_pthread_key_created missing")
224 }
225
226 if GOOS != "windows" {
227 if _cgo_thread_start == nil {
228 throw("_cgo_thread_start missing")
229 }
230 if _cgo_setenv == nil {
231 throw("_cgo_setenv missing")
232 }
233 if _cgo_unsetenv == nil {
234 throw("_cgo_unsetenv missing")
235 }
236 }
237 if _cgo_notify_runtime_init_done == nil {
238 throw("_cgo_notify_runtime_init_done missing")
239 }
240
241
242 if set_crosscall2 == nil {
243 throw("set_crosscall2 missing")
244 }
245 set_crosscall2()
246
247
248
249 startTemplateThread()
250 cgocall(_cgo_notify_runtime_init_done, nil)
251 }
252
253
254
255
256
257
258
259
260 last := lastmoduledatap
261 for m := &firstmoduledata; true; m = m.next {
262 doInit(m.inittasks)
263 if m == last {
264 break
265 }
266 }
267
268
269
270 inittrace.active = false
271
272 mainInitDone.Store(true)
273 close(mainInitDoneChan)
274
275 needUnlock = false
276 unlockOSThread()
277
278 if isarchive || islibrary {
279
280
281 if GOARCH == "wasm" {
282
283
284
285
286
287
288
289 pause(sys.GetCallerSP() - 16)
290 panic("unreachable")
291 }
292 return
293 }
294 fn := main_main
295 fn()
296
297
298
299
300
301
302
303
304 exitHooksRun := false
305 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
306 runExitHooks(0)
307 exitHooksRun = true
308 lsandoleakcheck()
309 }
310
311
312
313
314
315 if runningPanicDefers.Load() != 0 {
316
317 for c := 0; c < 1000; c++ {
318 if runningPanicDefers.Load() == 0 {
319 break
320 }
321 Gosched()
322 }
323 }
324 if panicking.Load() != 0 {
325 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
326 }
327 if !exitHooksRun {
328 runExitHooks(0)
329 }
330 if raceenabled {
331 racefini()
332 }
333
334 exit(0)
335 for {
336 var x *int32
337 *x = 0
338 }
339 }
340
341
342
343
344 func os_beforeExit(exitCode int) {
345 runExitHooks(exitCode)
346 if exitCode == 0 && raceenabled {
347 racefini()
348 }
349
350
351 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
352 lsandoleakcheck()
353 }
354 }
355
356 func init() {
357 exithook.Gosched = Gosched
358 exithook.Goid = func() uint64 { return getg().goid }
359 exithook.Throw = throw
360 }
361
362 func runExitHooks(code int) {
363 exithook.Run(code)
364 }
365
366
367 func init() {
368 go forcegchelper()
369 }
370
371 func forcegchelper() {
372 forcegc.g = getg()
373 lockInit(&forcegc.lock, lockRankForcegc)
374 for {
375 lock(&forcegc.lock)
376 if forcegc.idle.Load() {
377 throw("forcegc: phase error")
378 }
379 forcegc.idle.Store(true)
380 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
381
382 if debug.gctrace > 0 {
383 println("GC forced")
384 }
385
386 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
387 }
388 }
389
390
391
392
393
394 func Gosched() {
395 checkTimeouts()
396 mcall(gosched_m)
397 }
398
399
400
401
402
403 func goschedguarded() {
404 mcall(goschedguarded_m)
405 }
406
407
408
409
410
411
412 func goschedIfBusy() {
413 gp := getg()
414
415
416 if !gp.preempt && sched.npidle.Load() > 0 {
417 return
418 }
419 mcall(gosched_m)
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
451 if reason != waitReasonSleep {
452 checkTimeouts()
453 }
454 mp := acquirem()
455 gp := mp.curg
456 status := readgstatus(gp)
457 if status != _Grunning && status != _Gscanrunning {
458 throw("gopark: bad g status")
459 }
460 mp.waitlock = lock
461 mp.waitunlockf = unlockf
462 gp.waitreason = reason
463 mp.waitTraceBlockReason = traceReason
464 mp.waitTraceSkip = traceskip
465 releasem(mp)
466
467 mcall(park_m)
468 }
469
470
471
472 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
473 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
474 }
475
476
477
478
479
480
481
482
483
484
485
486 func goready(gp *g, traceskip int) {
487 systemstack(func() {
488 ready(gp, traceskip, true)
489 })
490 }
491
492
493 func acquireSudog() *sudog {
494
495
496
497
498
499
500
501
502 mp := acquirem()
503 pp := mp.p.ptr()
504 if len(pp.sudogcache) == 0 {
505 lock(&sched.sudoglock)
506
507 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
508 s := sched.sudogcache
509 sched.sudogcache = s.next
510 s.next = nil
511 pp.sudogcache = append(pp.sudogcache, s)
512 }
513 unlock(&sched.sudoglock)
514
515 if len(pp.sudogcache) == 0 {
516 pp.sudogcache = append(pp.sudogcache, new(sudog))
517 }
518 }
519 n := len(pp.sudogcache)
520 s := pp.sudogcache[n-1]
521 pp.sudogcache[n-1] = nil
522 pp.sudogcache = pp.sudogcache[:n-1]
523 if s.elem.get() != nil {
524 throw("acquireSudog: found s.elem != nil in cache")
525 }
526 releasem(mp)
527 return s
528 }
529
530
531 func releaseSudog(s *sudog) {
532 if s.elem.get() != nil {
533 throw("runtime: sudog with non-nil elem")
534 }
535 if s.isSelect {
536 throw("runtime: sudog with non-false isSelect")
537 }
538 if s.next != nil {
539 throw("runtime: sudog with non-nil next")
540 }
541 if s.prev != nil {
542 throw("runtime: sudog with non-nil prev")
543 }
544 if s.waitlink != nil {
545 throw("runtime: sudog with non-nil waitlink")
546 }
547 if s.c.get() != nil {
548 throw("runtime: sudog with non-nil c")
549 }
550 gp := getg()
551 if gp.param != nil {
552 throw("runtime: releaseSudog with non-nil gp.param")
553 }
554 mp := acquirem()
555 pp := mp.p.ptr()
556 if len(pp.sudogcache) == cap(pp.sudogcache) {
557
558 var first, last *sudog
559 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
560 n := len(pp.sudogcache)
561 p := pp.sudogcache[n-1]
562 pp.sudogcache[n-1] = nil
563 pp.sudogcache = pp.sudogcache[:n-1]
564 if first == nil {
565 first = p
566 } else {
567 last.next = p
568 }
569 last = p
570 }
571 lock(&sched.sudoglock)
572 last.next = sched.sudogcache
573 sched.sudogcache = first
574 unlock(&sched.sudoglock)
575 }
576 pp.sudogcache = append(pp.sudogcache, s)
577 releasem(mp)
578 }
579
580
581 func badmcall(fn func(*g)) {
582 throw("runtime: mcall called on m->g0 stack")
583 }
584
585 func badmcall2(fn func(*g)) {
586 throw("runtime: mcall function returned")
587 }
588
589 func badreflectcall() {
590 panic(plainError("arg size to reflect.call more than 1GB"))
591 }
592
593
594
595 func badmorestackg0() {
596 if !crashStackImplemented {
597 writeErrStr("fatal: morestack on g0\n")
598 return
599 }
600
601 g := getg()
602 switchToCrashStack(func() {
603 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
604 g.m.traceback = 2
605 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
606 print("\n")
607
608 throw("morestack on g0")
609 })
610 }
611
612
613
614 func badmorestackgsignal() {
615 writeErrStr("fatal: morestack on gsignal\n")
616 }
617
618
619 func badctxt() {
620 throw("ctxt != 0")
621 }
622
623
624
625 var gcrash g
626
627 var crashingG atomic.Pointer[g]
628
629
630
631
632
633
634
635
636
637 func switchToCrashStack(fn func()) {
638 me := getg()
639 if crashingG.CompareAndSwapNoWB(nil, me) {
640 switchToCrashStack0(fn)
641 abort()
642 }
643 if crashingG.Load() == me {
644
645 writeErrStr("fatal: recursive switchToCrashStack\n")
646 abort()
647 }
648
649 usleep_no_g(100)
650 writeErrStr("fatal: concurrent switchToCrashStack\n")
651 abort()
652 }
653
654
655
656
657 const crashStackImplemented = GOOS != "windows"
658
659
660 func switchToCrashStack0(fn func())
661
662 func lockedOSThread() bool {
663 gp := getg()
664 return gp.lockedm != 0 && gp.m.lockedg != 0
665 }
666
667 var (
668
669
670
671
672
673
674 allglock mutex
675 allgs []*g
676
677
678
679
680
681
682
683
684
685
686
687
688
689 allglen uintptr
690 allgptr **g
691 )
692
693 func allgadd(gp *g) {
694 if readgstatus(gp) == _Gidle {
695 throw("allgadd: bad status Gidle")
696 }
697
698 lock(&allglock)
699 allgs = append(allgs, gp)
700 if &allgs[0] != allgptr {
701 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
702 }
703 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
704 unlock(&allglock)
705 }
706
707
708
709
710 func allGsSnapshot() []*g {
711 assertWorldStoppedOrLockHeld(&allglock)
712
713
714
715
716
717
718 return allgs[:len(allgs):len(allgs)]
719 }
720
721
722 func atomicAllG() (**g, uintptr) {
723 length := atomic.Loaduintptr(&allglen)
724 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
725 return ptr, length
726 }
727
728
729 func atomicAllGIndex(ptr **g, i uintptr) *g {
730 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
731 }
732
733
734
735
736 func forEachG(fn func(gp *g)) {
737 lock(&allglock)
738 for _, gp := range allgs {
739 fn(gp)
740 }
741 unlock(&allglock)
742 }
743
744
745
746
747
748 func forEachGRace(fn func(gp *g)) {
749 ptr, length := atomicAllG()
750 for i := uintptr(0); i < length; i++ {
751 gp := atomicAllGIndex(ptr, i)
752 fn(gp)
753 }
754 return
755 }
756
757 const (
758
759
760 _GoidCacheBatch = 16
761 )
762
763
764
765 func cpuinit(env string) {
766 cpu.Initialize(env)
767
768
769
770 switch GOARCH {
771 case "386", "amd64":
772 x86HasAVX = cpu.X86.HasAVX
773 x86HasFMA = cpu.X86.HasFMA
774 x86HasPOPCNT = cpu.X86.HasPOPCNT
775 x86HasSSE41 = cpu.X86.HasSSE41
776
777 case "arm":
778 armHasVFPv4 = cpu.ARM.HasVFPv4
779
780 case "arm64":
781 arm64HasATOMICS = cpu.ARM64.HasATOMICS
782
783 case "loong64":
784 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
785 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
786 loong64HasDBAR_HINTS = cpu.Loong64.HasDBAR_HINTS
787 loong64HasLSX = cpu.Loong64.HasLSX
788
789 case "riscv64":
790 riscv64HasZbb = cpu.RISCV64.HasZbb
791 }
792 }
793
794
795
796
797
798
799 func getGodebugEarly() (string, bool) {
800 const prefix = "GODEBUG="
801 var env string
802 switch GOOS {
803 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
804
805
806
807 n := int32(0)
808 for argv_index(argv, argc+1+n) != nil {
809 n++
810 }
811
812 for i := int32(0); i < n; i++ {
813 p := argv_index(argv, argc+1+i)
814 s := unsafe.String(p, findnull(p))
815
816 if stringslite.HasPrefix(s, prefix) {
817 env = gostringnocopy(p)[len(prefix):]
818 break
819 }
820 }
821 break
822
823 default:
824 return "", false
825 }
826 return env, true
827 }
828
829
830
831
832
833
834
835
836
837 func schedinit() {
838 lockInit(&sched.lock, lockRankSched)
839 lockInit(&sched.sysmonlock, lockRankSysmon)
840 lockInit(&sched.deferlock, lockRankDefer)
841 lockInit(&sched.sudoglock, lockRankSudog)
842 lockInit(&deadlock, lockRankDeadlock)
843 lockInit(&paniclk, lockRankPanic)
844 lockInit(&allglock, lockRankAllg)
845 lockInit(&allpLock, lockRankAllp)
846 lockInit(&reflectOffs.lock, lockRankReflectOffs)
847 lockInit(&finlock, lockRankFin)
848 lockInit(&cpuprof.lock, lockRankCpuprof)
849 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
850 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
851 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
852 traceLockInit()
853
854
855
856 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
857
858 lockVerifyMSize()
859
860 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
861
862
863
864 gp := getg()
865 if raceenabled {
866 gp.racectx, raceprocctx0 = raceinit()
867 }
868
869 sched.maxmcount = 10000
870 crashFD.Store(^uintptr(0))
871
872
873 worldStopped()
874
875 godebug, parsedGodebug := getGodebugEarly()
876 if parsedGodebug {
877 parseRuntimeDebugVars(godebug)
878 }
879 ticks.init()
880 moduledataverify()
881 stackinit()
882 randinit()
883 mallocinit()
884 cpuinit(godebug)
885 maps.AlgInit()
886 mcommoninit(gp.m, -1)
887 modulesinit()
888 typelinksinit()
889 itabsinit()
890 stkobjinit()
891
892 sigsave(&gp.m.sigmask)
893 initSigmask = gp.m.sigmask
894
895 goargs()
896 goenvs()
897 secure()
898 checkfds()
899 if !parsedGodebug {
900
901
902 parseRuntimeDebugVars(gogetenv("GODEBUG"))
903 }
904 finishDebugVarsSetup()
905 gcinit()
906
907
908
909 gcrash.stack = stackalloc(16384)
910 gcrash.stackguard0 = gcrash.stack.lo + 1000
911 gcrash.stackguard1 = gcrash.stack.lo + 1000
912
913
914
915
916
917 if disableMemoryProfiling {
918 MemProfileRate = 0
919 }
920
921
922 mProfStackInit(gp.m)
923 defaultGOMAXPROCSInit()
924
925 lock(&sched.lock)
926 sched.lastpoll.Store(nanotime())
927 var procs int32
928 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
929 procs = int32(n)
930 sched.customGOMAXPROCS = true
931 } else {
932
933
934
935
936
937
938
939
940 procs = defaultGOMAXPROCS(numCPUStartup)
941 }
942 if procresize(procs) != nil {
943 throw("unknown runnable goroutine during bootstrap")
944 }
945 unlock(&sched.lock)
946
947
948 worldStarted()
949
950 if buildVersion == "" {
951
952
953 buildVersion = "unknown"
954 }
955 if len(modinfo) == 1 {
956
957
958 modinfo = ""
959 }
960 }
961
962 func dumpgstatus(gp *g) {
963 thisg := getg()
964 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
965 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
966 }
967
968
969 func checkmcount() {
970 assertLockHeld(&sched.lock)
971
972
973
974
975
976
977
978
979
980 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
981 if count > sched.maxmcount {
982 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
983 throw("thread exhaustion")
984 }
985 }
986
987
988
989
990
991 func mReserveID() int64 {
992 assertLockHeld(&sched.lock)
993
994 if sched.mnext+1 < sched.mnext {
995 throw("runtime: thread ID overflow")
996 }
997 id := sched.mnext
998 sched.mnext++
999 checkmcount()
1000 return id
1001 }
1002
1003
1004 func mcommoninit(mp *m, id int64) {
1005 gp := getg()
1006
1007
1008 if gp != gp.m.g0 {
1009 callers(1, mp.createstack[:])
1010 }
1011
1012 lock(&sched.lock)
1013
1014 if id >= 0 {
1015 mp.id = id
1016 } else {
1017 mp.id = mReserveID()
1018 }
1019
1020 mp.self = newMWeakPointer(mp)
1021
1022 mrandinit(mp)
1023
1024 mpreinit(mp)
1025 if mp.gsignal != nil {
1026 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1027 }
1028
1029
1030
1031 mp.alllink = allm
1032
1033
1034
1035 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1036 unlock(&sched.lock)
1037
1038
1039 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1040 mp.cgoCallers = new(cgoCallers)
1041 }
1042 mProfStackInit(mp)
1043 }
1044
1045
1046
1047
1048
1049 func mProfStackInit(mp *m) {
1050 if debug.profstackdepth == 0 {
1051
1052
1053 return
1054 }
1055 mp.profStack = makeProfStackFP()
1056 mp.mLockProfile.stack = makeProfStackFP()
1057 }
1058
1059
1060
1061
1062 func makeProfStackFP() []uintptr {
1063
1064
1065
1066
1067
1068
1069 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1070 }
1071
1072
1073
1074 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1075
1076
1077 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1078
1079 func (mp *m) becomeSpinning() {
1080 mp.spinning = true
1081 sched.nmspinning.Add(1)
1082 sched.needspinning.Store(0)
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092 func (mp *m) snapshotAllp() []*p {
1093 mp.allpSnapshot = allp
1094 return mp.allpSnapshot
1095 }
1096
1097
1098
1099
1100
1101
1102
1103 func (mp *m) clearAllpSnapshot() {
1104 mp.allpSnapshot = nil
1105 }
1106
1107 func (mp *m) hasCgoOnStack() bool {
1108 return mp.ncgo > 0 || mp.isextra
1109 }
1110
1111 const (
1112
1113
1114 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1115
1116
1117
1118 osHasLowResClockInt = goos.IsWindows
1119
1120
1121
1122 osHasLowResClock = osHasLowResClockInt > 0
1123 )
1124
1125
1126 func ready(gp *g, traceskip int, next bool) {
1127 status := readgstatus(gp)
1128
1129
1130 mp := acquirem()
1131 if status&^_Gscan != _Gwaiting {
1132 dumpgstatus(gp)
1133 throw("bad g->status in ready")
1134 }
1135
1136
1137 trace := traceAcquire()
1138 casgstatus(gp, _Gwaiting, _Grunnable)
1139 if trace.ok() {
1140 trace.GoUnpark(gp, traceskip)
1141 traceRelease(trace)
1142 }
1143 runqput(mp.p.ptr(), gp, next)
1144 wakep()
1145 releasem(mp)
1146 }
1147
1148
1149
1150 const freezeStopWait = 0x7fffffff
1151
1152
1153
1154 var freezing atomic.Bool
1155
1156
1157
1158
1159 func freezetheworld() {
1160 freezing.Store(true)
1161 if debug.dontfreezetheworld > 0 {
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 usleep(1000)
1187 return
1188 }
1189
1190
1191
1192
1193 for i := 0; i < 5; i++ {
1194
1195 sched.stopwait = freezeStopWait
1196 sched.gcwaiting.Store(true)
1197
1198 if !preemptall() {
1199 break
1200 }
1201 usleep(1000)
1202 }
1203
1204 usleep(1000)
1205 preemptall()
1206 usleep(1000)
1207 }
1208
1209
1210
1211
1212
1213 func readgstatus(gp *g) uint32 {
1214 return gp.atomicstatus.Load()
1215 }
1216
1217
1218
1219
1220
1221 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1222 success := false
1223
1224
1225 switch oldval {
1226 default:
1227 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1228 dumpgstatus(gp)
1229 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1230 case _Gscanrunnable,
1231 _Gscanwaiting,
1232 _Gscanrunning,
1233 _Gscansyscall,
1234 _Gscanleaked,
1235 _Gscanpreempted,
1236 _Gscandeadextra:
1237 if newval == oldval&^_Gscan {
1238 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1239 }
1240 }
1241 if !success {
1242 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1243 dumpgstatus(gp)
1244 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1245 }
1246 releaseLockRankAndM(lockRankGscan)
1247 }
1248
1249
1250
1251 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1252 switch oldval {
1253 case _Grunnable,
1254 _Grunning,
1255 _Gwaiting,
1256 _Gleaked,
1257 _Gsyscall,
1258 _Gdeadextra:
1259 if newval == oldval|_Gscan {
1260 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1261 if r {
1262 acquireLockRankAndM(lockRankGscan)
1263 }
1264 return r
1265
1266 }
1267 }
1268 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1269 throw("bad oldval passed to castogscanstatus")
1270 return false
1271 }
1272
1273
1274
1275 var casgstatusAlwaysTrack = false
1276
1277
1278
1279
1280
1281
1282
1283 func casgstatus(gp *g, oldval, newval uint32) {
1284 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1285 systemstack(func() {
1286
1287
1288 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1289 throw("casgstatus: bad incoming values")
1290 })
1291 }
1292
1293 lockWithRankMayAcquire(nil, lockRankGscan)
1294
1295
1296 const yieldDelay = 5 * 1000
1297 var nextYield int64
1298
1299
1300
1301 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1302 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1303 systemstack(func() {
1304
1305
1306 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1307 })
1308 }
1309 if i == 0 {
1310 nextYield = nanotime() + yieldDelay
1311 }
1312 if nanotime() < nextYield {
1313 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1314 procyield(1)
1315 }
1316 } else {
1317 osyield()
1318 nextYield = nanotime() + yieldDelay/2
1319 }
1320 }
1321
1322 if gp.bubble != nil {
1323 systemstack(func() {
1324 gp.bubble.changegstatus(gp, oldval, newval)
1325 })
1326 }
1327
1328 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1329
1330
1331 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1332 gp.tracking = true
1333 }
1334 gp.trackingSeq++
1335 }
1336 if !gp.tracking {
1337 return
1338 }
1339
1340
1341
1342
1343
1344
1345 switch oldval {
1346 case _Grunnable:
1347
1348
1349
1350 now := nanotime()
1351 gp.runnableTime += now - gp.trackingStamp
1352 gp.trackingStamp = 0
1353 case _Gwaiting:
1354 if !gp.waitreason.isMutexWait() {
1355
1356 break
1357 }
1358
1359
1360
1361
1362
1363 now := nanotime()
1364 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1365 gp.trackingStamp = 0
1366 }
1367 switch newval {
1368 case _Gwaiting:
1369 if !gp.waitreason.isMutexWait() {
1370
1371 break
1372 }
1373
1374 now := nanotime()
1375 gp.trackingStamp = now
1376 case _Grunnable:
1377
1378
1379 now := nanotime()
1380 gp.trackingStamp = now
1381 case _Grunning:
1382
1383
1384
1385 gp.tracking = false
1386 sched.timeToRun.record(gp.runnableTime)
1387 gp.runnableTime = 0
1388 }
1389 }
1390
1391
1392
1393
1394 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1395
1396 gp.waitreason = reason
1397 casgstatus(gp, old, _Gwaiting)
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1408 if !reason.isWaitingForSuspendG() {
1409 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1410 }
1411 casGToWaiting(gp, old, reason)
1412 }
1413
1414
1415
1416
1417
1418 func casGToPreemptScan(gp *g, old, new uint32) {
1419 if old != _Grunning || new != _Gscan|_Gpreempted {
1420 throw("bad g transition")
1421 }
1422 acquireLockRankAndM(lockRankGscan)
1423 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1424 }
1425
1426
1427
1428
1429
1430
1431 }
1432
1433
1434
1435
1436 func casGFromPreempted(gp *g, old, new uint32) bool {
1437 if old != _Gpreempted || new != _Gwaiting {
1438 throw("bad g transition")
1439 }
1440 gp.waitreason = waitReasonPreempted
1441 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1442 return false
1443 }
1444 if bubble := gp.bubble; bubble != nil {
1445 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1446 }
1447 return true
1448 }
1449
1450
1451 type stwReason uint8
1452
1453
1454
1455
1456 const (
1457 stwUnknown stwReason = iota
1458 stwGCMarkTerm
1459 stwGCSweepTerm
1460 stwWriteHeapDump
1461 stwGoroutineProfile
1462 stwGoroutineProfileCleanup
1463 stwAllGoroutinesStack
1464 stwReadMemStats
1465 stwAllThreadsSyscall
1466 stwGOMAXPROCS
1467 stwStartTrace
1468 stwStopTrace
1469 stwForTestCountPagesInUse
1470 stwForTestReadMetricsSlow
1471 stwForTestReadMemStatsSlow
1472 stwForTestPageCachePagesLeaked
1473 stwForTestResetDebugLog
1474 )
1475
1476 func (r stwReason) String() string {
1477 return stwReasonStrings[r]
1478 }
1479
1480 func (r stwReason) isGC() bool {
1481 return r == stwGCMarkTerm || r == stwGCSweepTerm
1482 }
1483
1484
1485
1486
1487 var stwReasonStrings = [...]string{
1488 stwUnknown: "unknown",
1489 stwGCMarkTerm: "GC mark termination",
1490 stwGCSweepTerm: "GC sweep termination",
1491 stwWriteHeapDump: "write heap dump",
1492 stwGoroutineProfile: "goroutine profile",
1493 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1494 stwAllGoroutinesStack: "all goroutines stack trace",
1495 stwReadMemStats: "read mem stats",
1496 stwAllThreadsSyscall: "AllThreadsSyscall",
1497 stwGOMAXPROCS: "GOMAXPROCS",
1498 stwStartTrace: "start trace",
1499 stwStopTrace: "stop trace",
1500 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1501 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1502 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1503 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1504 stwForTestResetDebugLog: "ResetDebugLog (test)",
1505 }
1506
1507
1508
1509 type worldStop struct {
1510 reason stwReason
1511 startedStopping int64
1512 finishedStopping int64
1513 stoppingCPUTime int64
1514 }
1515
1516
1517
1518
1519 var stopTheWorldContext worldStop
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 func stopTheWorld(reason stwReason) worldStop {
1539 semacquire(&worldsema)
1540 gp := getg()
1541 gp.m.preemptoff = reason.String()
1542 systemstack(func() {
1543 stopTheWorldContext = stopTheWorldWithSema(reason)
1544 })
1545 return stopTheWorldContext
1546 }
1547
1548
1549
1550
1551 func startTheWorld(w worldStop) {
1552 systemstack(func() { startTheWorldWithSema(0, w) })
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 mp := acquirem()
1570 mp.preemptoff = ""
1571 semrelease1(&worldsema, true, 0)
1572 releasem(mp)
1573 }
1574
1575
1576
1577
1578 func stopTheWorldGC(reason stwReason) worldStop {
1579 semacquire(&gcsema)
1580 return stopTheWorld(reason)
1581 }
1582
1583
1584
1585
1586 func startTheWorldGC(w worldStop) {
1587 startTheWorld(w)
1588 semrelease(&gcsema)
1589 }
1590
1591
1592 var worldsema uint32 = 1
1593
1594
1595
1596
1597
1598
1599
1600 var gcsema uint32 = 1
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634 func stopTheWorldWithSema(reason stwReason) worldStop {
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1648
1649 trace := traceAcquire()
1650 if trace.ok() {
1651 trace.STWStart(reason)
1652 traceRelease(trace)
1653 }
1654 gp := getg()
1655
1656
1657
1658 if gp.m.locks > 0 {
1659 throw("stopTheWorld: holding locks")
1660 }
1661
1662 lock(&sched.lock)
1663 start := nanotime()
1664 sched.stopwait = gomaxprocs
1665 sched.gcwaiting.Store(true)
1666 preemptall()
1667
1668
1669 gp.m.p.ptr().status = _Pgcstop
1670 gp.m.p.ptr().gcStopTime = start
1671 sched.stopwait--
1672
1673
1674 for _, pp := range allp {
1675 if thread, ok := setBlockOnExitSyscall(pp); ok {
1676 thread.gcstopP()
1677 thread.resume()
1678 }
1679 }
1680
1681
1682 now := nanotime()
1683 for {
1684 pp, _ := pidleget(now)
1685 if pp == nil {
1686 break
1687 }
1688 pp.status = _Pgcstop
1689 pp.gcStopTime = nanotime()
1690 sched.stopwait--
1691 }
1692 wait := sched.stopwait > 0
1693 unlock(&sched.lock)
1694
1695
1696 if wait {
1697 for {
1698
1699 if notetsleep(&sched.stopnote, 100*1000) {
1700 noteclear(&sched.stopnote)
1701 break
1702 }
1703 preemptall()
1704 }
1705 }
1706
1707 finish := nanotime()
1708 startTime := finish - start
1709 if reason.isGC() {
1710 sched.stwStoppingTimeGC.record(startTime)
1711 } else {
1712 sched.stwStoppingTimeOther.record(startTime)
1713 }
1714
1715
1716
1717
1718
1719 stoppingCPUTime := int64(0)
1720 bad := ""
1721 if sched.stopwait != 0 {
1722 bad = "stopTheWorld: not stopped (stopwait != 0)"
1723 } else {
1724 for _, pp := range allp {
1725 if pp.status != _Pgcstop {
1726 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1727 }
1728 if pp.gcStopTime == 0 && bad == "" {
1729 bad = "stopTheWorld: broken CPU time accounting"
1730 }
1731 stoppingCPUTime += finish - pp.gcStopTime
1732 pp.gcStopTime = 0
1733 }
1734 }
1735 if freezing.Load() {
1736
1737
1738
1739
1740 lock(&deadlock)
1741 lock(&deadlock)
1742 }
1743 if bad != "" {
1744 throw(bad)
1745 }
1746
1747 worldStopped()
1748
1749
1750 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1751
1752 return worldStop{
1753 reason: reason,
1754 startedStopping: start,
1755 finishedStopping: finish,
1756 stoppingCPUTime: stoppingCPUTime,
1757 }
1758 }
1759
1760
1761
1762
1763
1764
1765
1766 func startTheWorldWithSema(now int64, w worldStop) int64 {
1767 assertWorldStopped()
1768
1769 mp := acquirem()
1770 if netpollinited() {
1771 list, delta := netpoll(0)
1772 injectglist(&list)
1773 netpollAdjustWaiters(delta)
1774 }
1775 lock(&sched.lock)
1776
1777 procs := gomaxprocs
1778 if newprocs != 0 {
1779 procs = newprocs
1780 newprocs = 0
1781 }
1782 p1 := procresize(procs)
1783 sched.gcwaiting.Store(false)
1784 if sched.sysmonwait.Load() {
1785 sched.sysmonwait.Store(false)
1786 notewakeup(&sched.sysmonnote)
1787 }
1788 unlock(&sched.lock)
1789
1790 worldStarted()
1791
1792 for p1 != nil {
1793 p := p1
1794 p1 = p1.link.ptr()
1795 if p.m != 0 {
1796 mp := p.m.ptr()
1797 p.m = 0
1798 if mp.nextp != 0 {
1799 throw("startTheWorld: inconsistent mp->nextp")
1800 }
1801 mp.nextp.set(p)
1802 notewakeup(&mp.park)
1803 } else {
1804
1805 newm(nil, p, -1)
1806 }
1807 }
1808
1809
1810 if now == 0 {
1811 now = nanotime()
1812 }
1813 totalTime := now - w.startedStopping
1814 if w.reason.isGC() {
1815 sched.stwTotalTimeGC.record(totalTime)
1816 } else {
1817 sched.stwTotalTimeOther.record(totalTime)
1818 }
1819 trace := traceAcquire()
1820 if trace.ok() {
1821 trace.STWDone()
1822 traceRelease(trace)
1823 }
1824
1825
1826
1827
1828 wakep()
1829
1830 releasem(mp)
1831
1832 return now
1833 }
1834
1835
1836
1837 func usesLibcall() bool {
1838 switch GOOS {
1839 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1840 return true
1841 }
1842 return false
1843 }
1844
1845
1846
1847 func mStackIsSystemAllocated() bool {
1848 switch GOOS {
1849 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1850 return true
1851 }
1852 return false
1853 }
1854
1855
1856
1857 func mstart()
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868 func mstart0() {
1869 gp := getg()
1870
1871 osStack := gp.stack.lo == 0
1872 if osStack {
1873
1874
1875
1876
1877
1878
1879
1880
1881 size := gp.stack.hi
1882 if size == 0 {
1883 size = 16384 * sys.StackGuardMultiplier
1884 }
1885 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1886 gp.stack.lo = gp.stack.hi - size + 1024
1887 }
1888
1889
1890 gp.stackguard0 = gp.stack.lo + stackGuard
1891
1892
1893 gp.stackguard1 = gp.stackguard0
1894 mstart1()
1895
1896
1897 if mStackIsSystemAllocated() {
1898
1899
1900
1901 osStack = true
1902 }
1903 mexit(osStack)
1904 }
1905
1906
1907
1908
1909
1910 func mstart1() {
1911 gp := getg()
1912
1913 if gp != gp.m.g0 {
1914 throw("bad runtime·mstart")
1915 }
1916
1917
1918
1919
1920
1921
1922
1923 gp.sched.g = guintptr(unsafe.Pointer(gp))
1924 gp.sched.pc = sys.GetCallerPC()
1925 gp.sched.sp = sys.GetCallerSP()
1926
1927 asminit()
1928 minit()
1929
1930
1931
1932 if gp.m == &m0 {
1933 mstartm0()
1934 }
1935
1936 if debug.dataindependenttiming == 1 {
1937 sys.EnableDIT()
1938 }
1939
1940 if fn := gp.m.mstartfn; fn != nil {
1941 fn()
1942 }
1943
1944 if gp.m != &m0 {
1945 acquirep(gp.m.nextp.ptr())
1946 gp.m.nextp = 0
1947 }
1948 schedule()
1949 }
1950
1951
1952
1953
1954
1955
1956
1957 func mstartm0() {
1958
1959
1960
1961 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1962 cgoHasExtraM = true
1963 newextram()
1964 }
1965 initsig(false)
1966 }
1967
1968
1969
1970
1971 func mPark() {
1972 gp := getg()
1973
1974
1975 if goexperiment.RuntimeSecret {
1976 eraseSecretsSignalStk()
1977 }
1978 notesleep(&gp.m.park)
1979 noteclear(&gp.m.park)
1980 }
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 func mexit(osStack bool) {
1993 mp := getg().m
1994
1995 if mp == &m0 {
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 handoffp(releasep())
2008 lock(&sched.lock)
2009 sched.nmfreed++
2010 checkdead()
2011 unlock(&sched.lock)
2012 mPark()
2013 throw("locked m0 woke up")
2014 }
2015
2016 sigblock(true)
2017 unminit()
2018
2019
2020 if mp.gsignal != nil {
2021 stackfree(mp.gsignal.stack)
2022 if valgrindenabled {
2023 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2024 mp.gsignal.valgrindStackID = 0
2025 }
2026
2027
2028
2029
2030 mp.gsignal = nil
2031 }
2032
2033
2034 vgetrandomDestroy(mp)
2035
2036
2037
2038 mp.self.clear()
2039
2040
2041 lock(&sched.lock)
2042 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2043 if *pprev == mp {
2044 *pprev = mp.alllink
2045 goto found
2046 }
2047 }
2048 throw("m not found in allm")
2049 found:
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 mp.freeWait.Store(freeMWait)
2065 mp.freelink = sched.freem
2066 sched.freem = mp
2067 unlock(&sched.lock)
2068
2069 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2070 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2071
2072
2073 handoffp(releasep())
2074
2075
2076
2077
2078
2079 lock(&sched.lock)
2080 sched.nmfreed++
2081 checkdead()
2082 unlock(&sched.lock)
2083
2084 if GOOS == "darwin" || GOOS == "ios" {
2085
2086
2087 if mp.signalPending.Load() != 0 {
2088 pendingPreemptSignals.Add(-1)
2089 }
2090 }
2091
2092
2093
2094 mdestroy(mp)
2095
2096 if osStack {
2097
2098 mp.freeWait.Store(freeMRef)
2099
2100
2101
2102 return
2103 }
2104
2105
2106
2107
2108
2109 exitThread(&mp.freeWait)
2110 }
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 func forEachP(reason waitReason, fn func(*p)) {
2123 systemstack(func() {
2124 gp := getg().m.curg
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136 casGToWaitingForSuspendG(gp, _Grunning, reason)
2137 forEachPInternal(fn)
2138 casgstatus(gp, _Gwaiting, _Grunning)
2139 })
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151 func forEachPInternal(fn func(*p)) {
2152 mp := acquirem()
2153 pp := getg().m.p.ptr()
2154
2155 lock(&sched.lock)
2156 if sched.safePointWait != 0 {
2157 throw("forEachP: sched.safePointWait != 0")
2158 }
2159 sched.safePointWait = gomaxprocs - 1
2160 sched.safePointFn = fn
2161
2162
2163 for _, p2 := range allp {
2164 if p2 != pp {
2165 atomic.Store(&p2.runSafePointFn, 1)
2166 }
2167 }
2168 preemptall()
2169
2170
2171
2172
2173
2174
2175
2176 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2177 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2178 fn(p)
2179 sched.safePointWait--
2180 }
2181 }
2182
2183 wait := sched.safePointWait > 0
2184 unlock(&sched.lock)
2185
2186
2187 fn(pp)
2188
2189
2190
2191 for _, p2 := range allp {
2192 if atomic.Load(&p2.runSafePointFn) != 1 {
2193
2194 continue
2195 }
2196 if thread, ok := setBlockOnExitSyscall(p2); ok {
2197 thread.takeP()
2198 thread.resume()
2199 handoffp(p2)
2200 }
2201 }
2202
2203
2204 if wait {
2205 for {
2206
2207
2208
2209
2210 if notetsleep(&sched.safePointNote, 100*1000) {
2211 noteclear(&sched.safePointNote)
2212 break
2213 }
2214 preemptall()
2215 }
2216 }
2217 if sched.safePointWait != 0 {
2218 throw("forEachP: not done")
2219 }
2220 for _, p2 := range allp {
2221 if p2.runSafePointFn != 0 {
2222 throw("forEachP: P did not run fn")
2223 }
2224 }
2225
2226 lock(&sched.lock)
2227 sched.safePointFn = nil
2228 unlock(&sched.lock)
2229 releasem(mp)
2230 }
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 func runSafePointFn() {
2244 p := getg().m.p.ptr()
2245
2246
2247
2248 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2249 return
2250 }
2251 sched.safePointFn(p)
2252 lock(&sched.lock)
2253 sched.safePointWait--
2254 if sched.safePointWait == 0 {
2255 notewakeup(&sched.safePointNote)
2256 }
2257 unlock(&sched.lock)
2258 }
2259
2260
2261
2262
2263 var cgoThreadStart unsafe.Pointer
2264
2265 type cgothreadstart struct {
2266 g guintptr
2267 tls *uint64
2268 fn unsafe.Pointer
2269 }
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 func allocm(pp *p, fn func(), id int64) *m {
2281 allocmLock.rlock()
2282
2283
2284
2285
2286 acquirem()
2287
2288 gp := getg()
2289 if gp.m.p == 0 {
2290 acquirep(pp)
2291 }
2292
2293
2294
2295 if sched.freem != nil {
2296 lock(&sched.lock)
2297 var newList *m
2298 for freem := sched.freem; freem != nil; {
2299
2300 wait := freem.freeWait.Load()
2301 if wait == freeMWait {
2302 next := freem.freelink
2303 freem.freelink = newList
2304 newList = freem
2305 freem = next
2306 continue
2307 }
2308
2309
2310
2311 if traceEnabled() || traceShuttingDown() {
2312 traceThreadDestroy(freem)
2313 }
2314
2315
2316
2317 if wait == freeMStack {
2318
2319
2320
2321 systemstack(func() {
2322 stackfree(freem.g0.stack)
2323 if valgrindenabled {
2324 valgrindDeregisterStack(freem.g0.valgrindStackID)
2325 freem.g0.valgrindStackID = 0
2326 }
2327 })
2328 }
2329 freem = freem.freelink
2330 }
2331 sched.freem = newList
2332 unlock(&sched.lock)
2333 }
2334
2335 mp := &new(mPadded).m
2336 mp.mstartfn = fn
2337 mcommoninit(mp, id)
2338
2339
2340
2341 if iscgo || mStackIsSystemAllocated() {
2342 mp.g0 = malg(-1)
2343 } else {
2344 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2345 }
2346 mp.g0.m = mp
2347
2348 if pp == gp.m.p.ptr() {
2349 releasep()
2350 }
2351
2352 releasem(gp.m)
2353 allocmLock.runlock()
2354 return mp
2355 }
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 func needm(signal bool) {
2397 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2398
2399
2400
2401
2402
2403
2404 writeErrStr("fatal error: cgo callback before cgo call\n")
2405 exit(1)
2406 }
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 var sigmask sigset
2417 sigsave(&sigmask)
2418 sigblock(false)
2419
2420
2421
2422
2423 mp, last := getExtraM()
2424
2425
2426
2427
2428
2429
2430
2431
2432 mp.needextram = last
2433
2434
2435 mp.sigmask = sigmask
2436
2437
2438
2439 osSetupTLS(mp)
2440
2441
2442
2443 setg(mp.g0)
2444 sp := sys.GetCallerSP()
2445 callbackUpdateSystemStack(mp, sp, signal)
2446
2447
2448
2449
2450 mp.isExtraInC = false
2451
2452
2453 asminit()
2454 minit()
2455
2456
2457
2458
2459
2460
2461 var trace traceLocker
2462 if !signal {
2463 trace = traceAcquire()
2464 }
2465
2466
2467 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2468 sched.ngsys.Add(-1)
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478 addGSyscallNoP(mp)
2479
2480 if !signal {
2481 if trace.ok() {
2482 trace.GoCreateSyscall(mp.curg)
2483 traceRelease(trace)
2484 }
2485 }
2486 mp.isExtraInSig = signal
2487 }
2488
2489
2490
2491
2492 func needAndBindM() {
2493 needm(false)
2494
2495 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2496 cgoBindM()
2497 }
2498 }
2499
2500
2501
2502
2503 func newextram() {
2504 c := extraMWaiters.Swap(0)
2505 if c > 0 {
2506 for i := uint32(0); i < c; i++ {
2507 oneNewExtraM()
2508 }
2509 } else if extraMLength.Load() == 0 {
2510
2511 oneNewExtraM()
2512 }
2513 }
2514
2515
2516 func oneNewExtraM() {
2517
2518
2519
2520
2521
2522 mp := allocm(nil, nil, -1)
2523 gp := malg(4096)
2524 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2525 gp.sched.sp = gp.stack.hi
2526 gp.sched.sp -= 4 * goarch.PtrSize
2527 gp.sched.lr = 0
2528 gp.sched.g = guintptr(unsafe.Pointer(gp))
2529 gp.syscallpc = gp.sched.pc
2530 gp.syscallsp = gp.sched.sp
2531 gp.stktopsp = gp.sched.sp
2532
2533
2534
2535 casgstatus(gp, _Gidle, _Gdeadextra)
2536 gp.m = mp
2537 mp.curg = gp
2538 mp.isextra = true
2539
2540 mp.isExtraInC = true
2541 mp.lockedInt++
2542 mp.lockedg.set(gp)
2543 gp.lockedm.set(mp)
2544 gp.goid = sched.goidgen.Add(1)
2545 if raceenabled {
2546 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2547 }
2548
2549 allgadd(gp)
2550
2551
2552
2553
2554
2555 sched.ngsys.Add(1)
2556
2557
2558 addExtraM(mp)
2559 }
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 func dropm() {
2595
2596
2597
2598 mp := getg().m
2599
2600
2601
2602
2603
2604 var trace traceLocker
2605 if !mp.isExtraInSig {
2606 trace = traceAcquire()
2607 }
2608
2609
2610 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2611 mp.curg.preemptStop = false
2612 sched.ngsys.Add(1)
2613 decGSyscallNoP(mp)
2614
2615 if !mp.isExtraInSig {
2616 if trace.ok() {
2617 trace.GoDestroySyscall()
2618 traceRelease(trace)
2619 }
2620 }
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 mp.syscalltick--
2636
2637
2638
2639 mp.curg.trace.reset()
2640
2641
2642
2643
2644 if traceEnabled() || traceShuttingDown() {
2645
2646
2647
2648
2649
2650
2651
2652 lock(&sched.lock)
2653 traceThreadDestroy(mp)
2654 unlock(&sched.lock)
2655 }
2656 mp.isExtraInSig = false
2657
2658
2659
2660
2661
2662 sigmask := mp.sigmask
2663 sigblock(false)
2664 unminit()
2665
2666 setg(nil)
2667
2668
2669
2670 g0 := mp.g0
2671 g0.stack.hi = 0
2672 g0.stack.lo = 0
2673 g0.stackguard0 = 0
2674 g0.stackguard1 = 0
2675 mp.g0StackAccurate = false
2676
2677 putExtraM(mp)
2678
2679 msigrestore(sigmask)
2680 }
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702 func cgoBindM() {
2703 if GOOS == "windows" || GOOS == "plan9" {
2704 fatal("bindm in unexpected GOOS")
2705 }
2706 g := getg()
2707 if g.m.g0 != g {
2708 fatal("the current g is not g0")
2709 }
2710 if _cgo_bindm != nil {
2711 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2712 }
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726 func getm() uintptr {
2727 return uintptr(unsafe.Pointer(getg().m))
2728 }
2729
2730 var (
2731
2732
2733
2734
2735
2736
2737 extraM atomic.Uintptr
2738
2739 extraMLength atomic.Uint32
2740
2741 extraMWaiters atomic.Uint32
2742
2743
2744 extraMInUse atomic.Uint32
2745 )
2746
2747
2748
2749
2750
2751
2752
2753
2754 func lockextra(nilokay bool) *m {
2755 const locked = 1
2756
2757 incr := false
2758 for {
2759 old := extraM.Load()
2760 if old == locked {
2761 osyield_no_g()
2762 continue
2763 }
2764 if old == 0 && !nilokay {
2765 if !incr {
2766
2767
2768
2769 extraMWaiters.Add(1)
2770 incr = true
2771 }
2772 usleep_no_g(1)
2773 continue
2774 }
2775 if extraM.CompareAndSwap(old, locked) {
2776 return (*m)(unsafe.Pointer(old))
2777 }
2778 osyield_no_g()
2779 continue
2780 }
2781 }
2782
2783
2784 func unlockextra(mp *m, delta int32) {
2785 extraMLength.Add(delta)
2786 extraM.Store(uintptr(unsafe.Pointer(mp)))
2787 }
2788
2789
2790
2791
2792
2793
2794
2795
2796 func getExtraM() (mp *m, last bool) {
2797 mp = lockextra(false)
2798 extraMInUse.Add(1)
2799 unlockextra(mp.schedlink.ptr(), -1)
2800 return mp, mp.schedlink.ptr() == nil
2801 }
2802
2803
2804
2805
2806
2807 func putExtraM(mp *m) {
2808 extraMInUse.Add(-1)
2809 addExtraM(mp)
2810 }
2811
2812
2813
2814
2815 func addExtraM(mp *m) {
2816 mnext := lockextra(true)
2817 mp.schedlink.set(mnext)
2818 unlockextra(mp, 1)
2819 }
2820
2821 var (
2822
2823
2824
2825 allocmLock rwmutex
2826
2827
2828
2829
2830 execLock rwmutex
2831 )
2832
2833
2834
2835 const (
2836 failthreadcreate = "runtime: failed to create new OS thread\n"
2837 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2838 )
2839
2840
2841
2842
2843 var newmHandoff struct {
2844 lock mutex
2845
2846
2847
2848 newm muintptr
2849
2850
2851
2852 waiting bool
2853 wake note
2854
2855
2856
2857
2858 haveTemplateThread uint32
2859 }
2860
2861
2862
2863
2864
2865
2866
2867
2868 func newm(fn func(), pp *p, id int64) {
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879 acquirem()
2880
2881 mp := allocm(pp, fn, id)
2882 mp.nextp.set(pp)
2883 mp.sigmask = initSigmask
2884 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896 lock(&newmHandoff.lock)
2897 if newmHandoff.haveTemplateThread == 0 {
2898 throw("on a locked thread with no template thread")
2899 }
2900 mp.schedlink = newmHandoff.newm
2901 newmHandoff.newm.set(mp)
2902 if newmHandoff.waiting {
2903 newmHandoff.waiting = false
2904 notewakeup(&newmHandoff.wake)
2905 }
2906 unlock(&newmHandoff.lock)
2907
2908
2909
2910 releasem(getg().m)
2911 return
2912 }
2913 newm1(mp)
2914 releasem(getg().m)
2915 }
2916
2917 func newm1(mp *m) {
2918 if iscgo && _cgo_thread_start != nil {
2919 var ts cgothreadstart
2920 ts.g.set(mp.g0)
2921 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2922 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2923 if msanenabled {
2924 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2925 }
2926 if asanenabled {
2927 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2928 }
2929 execLock.rlock()
2930 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2931 execLock.runlock()
2932 return
2933 }
2934 execLock.rlock()
2935 newosproc(mp)
2936 execLock.runlock()
2937 }
2938
2939
2940
2941
2942
2943 func startTemplateThread() {
2944 if GOARCH == "wasm" {
2945 return
2946 }
2947
2948
2949
2950 mp := acquirem()
2951 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2952 releasem(mp)
2953 return
2954 }
2955 newm(templateThread, nil, -1)
2956 releasem(mp)
2957 }
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971 func templateThread() {
2972 lock(&sched.lock)
2973 sched.nmsys++
2974 checkdead()
2975 unlock(&sched.lock)
2976
2977 for {
2978 lock(&newmHandoff.lock)
2979 for newmHandoff.newm != 0 {
2980 newm := newmHandoff.newm.ptr()
2981 newmHandoff.newm = 0
2982 unlock(&newmHandoff.lock)
2983 for newm != nil {
2984 next := newm.schedlink.ptr()
2985 newm.schedlink = 0
2986 newm1(newm)
2987 newm = next
2988 }
2989 lock(&newmHandoff.lock)
2990 }
2991 newmHandoff.waiting = true
2992 noteclear(&newmHandoff.wake)
2993 unlock(&newmHandoff.lock)
2994 notesleep(&newmHandoff.wake)
2995 }
2996 }
2997
2998
2999
3000 func stopm() {
3001 gp := getg()
3002
3003 if gp.m.locks != 0 {
3004 throw("stopm holding locks")
3005 }
3006 if gp.m.p != 0 {
3007 throw("stopm holding p")
3008 }
3009 if gp.m.spinning {
3010 throw("stopm spinning")
3011 }
3012
3013 lock(&sched.lock)
3014 mput(gp.m)
3015 unlock(&sched.lock)
3016 mPark()
3017 acquirep(gp.m.nextp.ptr())
3018 gp.m.nextp = 0
3019 }
3020
3021 func mspinning() {
3022
3023 getg().m.spinning = true
3024 }
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043 func startm(pp *p, spinning, lockheld bool) {
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060 mp := acquirem()
3061 if !lockheld {
3062 lock(&sched.lock)
3063 }
3064 if pp == nil {
3065 if spinning {
3066
3067
3068
3069 throw("startm: P required for spinning=true")
3070 }
3071 pp, _ = pidleget(0)
3072 if pp == nil {
3073 if !lockheld {
3074 unlock(&sched.lock)
3075 }
3076 releasem(mp)
3077 return
3078 }
3079 }
3080 nmp := mget()
3081 if nmp == nil {
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096 id := mReserveID()
3097 unlock(&sched.lock)
3098
3099 var fn func()
3100 if spinning {
3101
3102 fn = mspinning
3103 }
3104 newm(fn, pp, id)
3105
3106 if lockheld {
3107 lock(&sched.lock)
3108 }
3109
3110
3111 releasem(mp)
3112 return
3113 }
3114 if !lockheld {
3115 unlock(&sched.lock)
3116 }
3117 if nmp.spinning {
3118 throw("startm: m is spinning")
3119 }
3120 if nmp.nextp != 0 {
3121 throw("startm: m has p")
3122 }
3123 if spinning && !runqempty(pp) {
3124 throw("startm: p has runnable gs")
3125 }
3126
3127 nmp.spinning = spinning
3128 nmp.nextp.set(pp)
3129 notewakeup(&nmp.park)
3130
3131
3132 releasem(mp)
3133 }
3134
3135
3136
3137
3138
3139 func handoffp(pp *p) {
3140
3141
3142
3143
3144 if !runqempty(pp) || !sched.runq.empty() {
3145 startm(pp, false, false)
3146 return
3147 }
3148
3149 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3150 startm(pp, false, false)
3151 return
3152 }
3153
3154 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3155 startm(pp, false, false)
3156 return
3157 }
3158
3159
3160 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3161 sched.needspinning.Store(0)
3162 startm(pp, true, false)
3163 return
3164 }
3165 lock(&sched.lock)
3166 if sched.gcwaiting.Load() {
3167 pp.status = _Pgcstop
3168 pp.gcStopTime = nanotime()
3169 sched.stopwait--
3170 if sched.stopwait == 0 {
3171 notewakeup(&sched.stopnote)
3172 }
3173 unlock(&sched.lock)
3174 return
3175 }
3176 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3177 sched.safePointFn(pp)
3178 sched.safePointWait--
3179 if sched.safePointWait == 0 {
3180 notewakeup(&sched.safePointNote)
3181 }
3182 }
3183 if !sched.runq.empty() {
3184 unlock(&sched.lock)
3185 startm(pp, false, false)
3186 return
3187 }
3188
3189
3190 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3191 unlock(&sched.lock)
3192 startm(pp, false, false)
3193 return
3194 }
3195
3196
3197
3198 when := pp.timers.wakeTime()
3199 pidleput(pp, 0)
3200 unlock(&sched.lock)
3201
3202 if when != 0 {
3203 wakeNetPoller(when)
3204 }
3205 }
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220 func wakep() {
3221
3222
3223 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3224 return
3225 }
3226
3227
3228
3229
3230
3231
3232 mp := acquirem()
3233
3234 var pp *p
3235 lock(&sched.lock)
3236 pp, _ = pidlegetSpinning(0)
3237 if pp == nil {
3238 if sched.nmspinning.Add(-1) < 0 {
3239 throw("wakep: negative nmspinning")
3240 }
3241 unlock(&sched.lock)
3242 releasem(mp)
3243 return
3244 }
3245
3246
3247
3248
3249 unlock(&sched.lock)
3250
3251 startm(pp, true, false)
3252
3253 releasem(mp)
3254 }
3255
3256
3257
3258 func stoplockedm() {
3259 gp := getg()
3260
3261 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3262 throw("stoplockedm: inconsistent locking")
3263 }
3264 if gp.m.p != 0 {
3265
3266 pp := releasep()
3267 handoffp(pp)
3268 }
3269 incidlelocked(1)
3270
3271 mPark()
3272 status := readgstatus(gp.m.lockedg.ptr())
3273 if status&^_Gscan != _Grunnable {
3274 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3275 dumpgstatus(gp.m.lockedg.ptr())
3276 throw("stoplockedm: not runnable")
3277 }
3278 acquirep(gp.m.nextp.ptr())
3279 gp.m.nextp = 0
3280 }
3281
3282
3283
3284
3285
3286 func startlockedm(gp *g) {
3287 mp := gp.lockedm.ptr()
3288 if mp == getg().m {
3289 throw("startlockedm: locked to me")
3290 }
3291 if mp.nextp != 0 {
3292 throw("startlockedm: m has p")
3293 }
3294
3295 incidlelocked(-1)
3296 pp := releasep()
3297 mp.nextp.set(pp)
3298 notewakeup(&mp.park)
3299 stopm()
3300 }
3301
3302
3303
3304 func gcstopm() {
3305 gp := getg()
3306
3307 if !sched.gcwaiting.Load() {
3308 throw("gcstopm: not waiting for gc")
3309 }
3310 if gp.m.spinning {
3311 gp.m.spinning = false
3312
3313
3314 if sched.nmspinning.Add(-1) < 0 {
3315 throw("gcstopm: negative nmspinning")
3316 }
3317 }
3318 pp := releasep()
3319 lock(&sched.lock)
3320 pp.status = _Pgcstop
3321 pp.gcStopTime = nanotime()
3322 sched.stopwait--
3323 if sched.stopwait == 0 {
3324 notewakeup(&sched.stopnote)
3325 }
3326 unlock(&sched.lock)
3327 stopm()
3328 }
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 func execute(gp *g, inheritTime bool) {
3340 mp := getg().m
3341
3342 if goroutineProfile.active {
3343
3344
3345
3346 tryRecordGoroutineProfile(gp, nil, osyield)
3347 }
3348
3349
3350 mp.curg = gp
3351 gp.m = mp
3352 gp.syncSafePoint = false
3353 casgstatus(gp, _Grunnable, _Grunning)
3354 gp.waitsince = 0
3355 gp.preempt = false
3356 gp.stackguard0 = gp.stack.lo + stackGuard
3357 if !inheritTime {
3358 mp.p.ptr().schedtick++
3359 }
3360
3361 if sys.DITSupported && debug.dataindependenttiming != 1 {
3362 if gp.ditWanted && !mp.ditEnabled {
3363
3364
3365 sys.EnableDIT()
3366 mp.ditEnabled = true
3367 } else if !gp.ditWanted && mp.ditEnabled {
3368
3369
3370
3371
3372
3373 sys.DisableDIT()
3374 mp.ditEnabled = false
3375 }
3376 }
3377
3378
3379 hz := sched.profilehz
3380 if mp.profilehz != hz {
3381 setThreadCPUProfiler(hz)
3382 }
3383
3384 trace := traceAcquire()
3385 if trace.ok() {
3386 trace.GoStart()
3387 traceRelease(trace)
3388 }
3389
3390 gogo(&gp.sched)
3391 }
3392
3393
3394
3395
3396
3397 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3398 mp := getg().m
3399
3400
3401
3402
3403
3404 top:
3405
3406
3407
3408 mp.clearAllpSnapshot()
3409
3410 pp := mp.p.ptr()
3411 if sched.gcwaiting.Load() {
3412 gcstopm()
3413 goto top
3414 }
3415 if pp.runSafePointFn != 0 {
3416 runSafePointFn()
3417 }
3418
3419
3420
3421
3422
3423 now, pollUntil, _ := pp.timers.check(0, nil)
3424
3425
3426 if traceEnabled() || traceShuttingDown() {
3427 gp := traceReader()
3428 if gp != nil {
3429 trace := traceAcquire()
3430 casgstatus(gp, _Gwaiting, _Grunnable)
3431 if trace.ok() {
3432 trace.GoUnpark(gp, 0)
3433 traceRelease(trace)
3434 }
3435 return gp, false, true
3436 }
3437 }
3438
3439
3440 if gcBlackenEnabled != 0 {
3441 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3442 if gp != nil {
3443 return gp, false, true
3444 }
3445 now = tnow
3446 }
3447
3448
3449
3450
3451 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3452 lock(&sched.lock)
3453 gp := globrunqget()
3454 unlock(&sched.lock)
3455 if gp != nil {
3456 return gp, false, false
3457 }
3458 }
3459
3460
3461 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3462 if gp := wakefing(); gp != nil {
3463 ready(gp, 0, true)
3464 }
3465 }
3466
3467
3468 if gcCleanups.needsWake() {
3469 gcCleanups.wake()
3470 }
3471
3472 if *cgo_yield != nil {
3473 asmcgocall(*cgo_yield, nil)
3474 }
3475
3476
3477 if gp, inheritTime := runqget(pp); gp != nil {
3478 return gp, inheritTime, false
3479 }
3480
3481
3482 if !sched.runq.empty() {
3483 lock(&sched.lock)
3484 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3485 unlock(&sched.lock)
3486 if gp != nil {
3487 if runqputbatch(pp, &q); !q.empty() {
3488 throw("Couldn't put Gs into empty local runq")
3489 }
3490 return gp, false, false
3491 }
3492 }
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3504 list, delta := netpoll(0)
3505 sched.pollingNet.Store(0)
3506 if !list.empty() {
3507 gp := list.pop()
3508 injectglist(&list)
3509 netpollAdjustWaiters(delta)
3510 trace := traceAcquire()
3511 casgstatus(gp, _Gwaiting, _Grunnable)
3512 if trace.ok() {
3513 trace.GoUnpark(gp, 0)
3514 traceRelease(trace)
3515 }
3516 return gp, false, false
3517 }
3518 }
3519
3520
3521
3522
3523
3524
3525 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3526 if !mp.spinning {
3527 mp.becomeSpinning()
3528 }
3529
3530 gp, inheritTime, tnow, w, newWork := stealWork(now)
3531 if gp != nil {
3532
3533 return gp, inheritTime, false
3534 }
3535 if newWork {
3536
3537
3538 goto top
3539 }
3540
3541 now = tnow
3542 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3543
3544 pollUntil = w
3545 }
3546 }
3547
3548
3549
3550
3551
3552 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3553 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3554 if node != nil {
3555 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3556 gp := node.gp.ptr()
3557
3558 trace := traceAcquire()
3559 casgstatus(gp, _Gwaiting, _Grunnable)
3560 if trace.ok() {
3561 trace.GoUnpark(gp, 0)
3562 traceRelease(trace)
3563 }
3564 return gp, false, false
3565 }
3566 gcController.removeIdleMarkWorker()
3567 }
3568
3569
3570
3571
3572
3573 gp, otherReady := beforeIdle(now, pollUntil)
3574 if gp != nil {
3575 trace := traceAcquire()
3576 casgstatus(gp, _Gwaiting, _Grunnable)
3577 if trace.ok() {
3578 trace.GoUnpark(gp, 0)
3579 traceRelease(trace)
3580 }
3581 return gp, false, false
3582 }
3583 if otherReady {
3584 goto top
3585 }
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 allpSnapshot := mp.snapshotAllp()
3596
3597
3598 idlepMaskSnapshot := idlepMask
3599 timerpMaskSnapshot := timerpMask
3600
3601
3602 lock(&sched.lock)
3603 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3604 unlock(&sched.lock)
3605 goto top
3606 }
3607 if !sched.runq.empty() {
3608 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3609 unlock(&sched.lock)
3610 if gp == nil {
3611 throw("global runq empty with non-zero runqsize")
3612 }
3613 if runqputbatch(pp, &q); !q.empty() {
3614 throw("Couldn't put Gs into empty local runq")
3615 }
3616 return gp, false, false
3617 }
3618 if !mp.spinning && sched.needspinning.Load() == 1 {
3619
3620 mp.becomeSpinning()
3621 unlock(&sched.lock)
3622 goto top
3623 }
3624 if releasep() != pp {
3625 throw("findRunnable: wrong p")
3626 }
3627 now = pidleput(pp, now)
3628 unlock(&sched.lock)
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666 wasSpinning := mp.spinning
3667 if mp.spinning {
3668 mp.spinning = false
3669 if sched.nmspinning.Add(-1) < 0 {
3670 throw("findRunnable: negative nmspinning")
3671 }
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684 lock(&sched.lock)
3685 if !sched.runq.empty() {
3686 pp, _ := pidlegetSpinning(0)
3687 if pp != nil {
3688 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3689 unlock(&sched.lock)
3690 if gp == nil {
3691 throw("global runq empty with non-zero runqsize")
3692 }
3693 if runqputbatch(pp, &q); !q.empty() {
3694 throw("Couldn't put Gs into empty local runq")
3695 }
3696 acquirep(pp)
3697 mp.becomeSpinning()
3698 return gp, false, false
3699 }
3700 }
3701 unlock(&sched.lock)
3702
3703 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3704 if pp != nil {
3705 acquirep(pp)
3706 mp.becomeSpinning()
3707 goto top
3708 }
3709
3710
3711 pp, gp := checkIdleGCNoP()
3712 if pp != nil {
3713 acquirep(pp)
3714 mp.becomeSpinning()
3715
3716
3717 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3718 trace := traceAcquire()
3719 casgstatus(gp, _Gwaiting, _Grunnable)
3720 if trace.ok() {
3721 trace.GoUnpark(gp, 0)
3722 traceRelease(trace)
3723 }
3724 return gp, false, false
3725 }
3726
3727
3728
3729
3730
3731
3732
3733 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3734 }
3735
3736
3737
3738
3739
3740 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3741 sched.pollUntil.Store(pollUntil)
3742 if mp.p != 0 {
3743 throw("findRunnable: netpoll with p")
3744 }
3745 if mp.spinning {
3746 throw("findRunnable: netpoll with spinning")
3747 }
3748 delay := int64(-1)
3749 if pollUntil != 0 {
3750 if now == 0 {
3751 now = nanotime()
3752 }
3753 delay = pollUntil - now
3754 if delay < 0 {
3755 delay = 0
3756 }
3757 }
3758 if faketime != 0 {
3759
3760 delay = 0
3761 }
3762 list, delta := netpoll(delay)
3763
3764 now = nanotime()
3765 sched.pollUntil.Store(0)
3766 sched.lastpoll.Store(now)
3767 if faketime != 0 && list.empty() {
3768
3769
3770 stopm()
3771 goto top
3772 }
3773 lock(&sched.lock)
3774 pp, _ := pidleget(now)
3775 unlock(&sched.lock)
3776 if pp == nil {
3777 injectglist(&list)
3778 netpollAdjustWaiters(delta)
3779 } else {
3780 acquirep(pp)
3781 if !list.empty() {
3782 gp := list.pop()
3783 injectglist(&list)
3784 netpollAdjustWaiters(delta)
3785 trace := traceAcquire()
3786 casgstatus(gp, _Gwaiting, _Grunnable)
3787 if trace.ok() {
3788 trace.GoUnpark(gp, 0)
3789 traceRelease(trace)
3790 }
3791 return gp, false, false
3792 }
3793 if wasSpinning {
3794 mp.becomeSpinning()
3795 }
3796 goto top
3797 }
3798 } else if pollUntil != 0 && netpollinited() {
3799 pollerPollUntil := sched.pollUntil.Load()
3800 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3801 netpollBreak()
3802 }
3803 }
3804 stopm()
3805 goto top
3806 }
3807
3808
3809
3810
3811
3812 func pollWork() bool {
3813 if !sched.runq.empty() {
3814 return true
3815 }
3816 p := getg().m.p.ptr()
3817 if !runqempty(p) {
3818 return true
3819 }
3820 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3821 if list, delta := netpoll(0); !list.empty() {
3822 injectglist(&list)
3823 netpollAdjustWaiters(delta)
3824 return true
3825 }
3826 }
3827 return false
3828 }
3829
3830
3831
3832
3833
3834
3835
3836 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3837 pp := getg().m.p.ptr()
3838
3839 ranTimer := false
3840
3841 const stealTries = 4
3842 for i := 0; i < stealTries; i++ {
3843 stealTimersOrRunNextG := i == stealTries-1
3844
3845 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3846 if sched.gcwaiting.Load() {
3847
3848 return nil, false, now, pollUntil, true
3849 }
3850 p2 := allp[enum.position()]
3851 if pp == p2 {
3852 continue
3853 }
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3869 tnow, w, ran := p2.timers.check(now, nil)
3870 now = tnow
3871 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3872 pollUntil = w
3873 }
3874 if ran {
3875
3876
3877
3878
3879
3880
3881
3882
3883 if gp, inheritTime := runqget(pp); gp != nil {
3884 return gp, inheritTime, now, pollUntil, ranTimer
3885 }
3886 ranTimer = true
3887 }
3888 }
3889
3890
3891 if !idlepMask.read(enum.position()) {
3892 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3893 return gp, false, now, pollUntil, ranTimer
3894 }
3895 }
3896 }
3897 }
3898
3899
3900
3901
3902 return nil, false, now, pollUntil, ranTimer
3903 }
3904
3905
3906
3907
3908
3909
3910 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3911 for id, p2 := range allpSnapshot {
3912 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3913 lock(&sched.lock)
3914 pp, _ := pidlegetSpinning(0)
3915 if pp == nil {
3916
3917 unlock(&sched.lock)
3918 return nil
3919 }
3920 unlock(&sched.lock)
3921 return pp
3922 }
3923 }
3924
3925
3926 return nil
3927 }
3928
3929
3930
3931
3932 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3933 for id, p2 := range allpSnapshot {
3934 if timerpMaskSnapshot.read(uint32(id)) {
3935 w := p2.timers.wakeTime()
3936 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3937 pollUntil = w
3938 }
3939 }
3940 }
3941
3942 return pollUntil
3943 }
3944
3945
3946
3947
3948
3949 func checkIdleGCNoP() (*p, *g) {
3950
3951
3952
3953
3954
3955
3956 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3957 return nil, nil
3958 }
3959 if !gcShouldScheduleWorker(nil) {
3960 return nil, nil
3961 }
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980 lock(&sched.lock)
3981 pp, now := pidlegetSpinning(0)
3982 if pp == nil {
3983 unlock(&sched.lock)
3984 return nil, nil
3985 }
3986
3987
3988 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3989 pidleput(pp, now)
3990 unlock(&sched.lock)
3991 return nil, nil
3992 }
3993
3994 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3995 if node == nil {
3996 pidleput(pp, now)
3997 unlock(&sched.lock)
3998 gcController.removeIdleMarkWorker()
3999 return nil, nil
4000 }
4001
4002 unlock(&sched.lock)
4003
4004 return pp, node.gp.ptr()
4005 }
4006
4007
4008
4009
4010 func wakeNetPoller(when int64) {
4011 if sched.lastpoll.Load() == 0 {
4012
4013
4014
4015
4016 pollerPollUntil := sched.pollUntil.Load()
4017 if pollerPollUntil == 0 || pollerPollUntil > when {
4018 netpollBreak()
4019 }
4020 } else {
4021
4022
4023 if GOOS != "plan9" {
4024 wakep()
4025 }
4026 }
4027 }
4028
4029 func resetspinning() {
4030 gp := getg()
4031 if !gp.m.spinning {
4032 throw("resetspinning: not a spinning m")
4033 }
4034 gp.m.spinning = false
4035 nmspinning := sched.nmspinning.Add(-1)
4036 if nmspinning < 0 {
4037 throw("findRunnable: negative nmspinning")
4038 }
4039
4040
4041
4042 wakep()
4043 }
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053 func injectglist(glist *gList) {
4054 if glist.empty() {
4055 return
4056 }
4057
4058
4059
4060 var tail *g
4061 trace := traceAcquire()
4062 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4063 tail = gp
4064 casgstatus(gp, _Gwaiting, _Grunnable)
4065 if trace.ok() {
4066 trace.GoUnpark(gp, 0)
4067 }
4068 }
4069 if trace.ok() {
4070 traceRelease(trace)
4071 }
4072
4073
4074 q := gQueue{glist.head, tail.guintptr(), glist.size}
4075 *glist = gList{}
4076
4077 startIdle := func(n int32) {
4078 for ; n > 0; n-- {
4079 mp := acquirem()
4080 lock(&sched.lock)
4081
4082 pp, _ := pidlegetSpinning(0)
4083 if pp == nil {
4084 unlock(&sched.lock)
4085 releasem(mp)
4086 break
4087 }
4088
4089 startm(pp, false, true)
4090 unlock(&sched.lock)
4091 releasem(mp)
4092 }
4093 }
4094
4095 pp := getg().m.p.ptr()
4096 if pp == nil {
4097 n := q.size
4098 lock(&sched.lock)
4099 globrunqputbatch(&q)
4100 unlock(&sched.lock)
4101 startIdle(n)
4102 return
4103 }
4104
4105 var globq gQueue
4106 npidle := sched.npidle.Load()
4107 for ; npidle > 0 && !q.empty(); npidle-- {
4108 g := q.pop()
4109 globq.pushBack(g)
4110 }
4111 if !globq.empty() {
4112 n := globq.size
4113 lock(&sched.lock)
4114 globrunqputbatch(&globq)
4115 unlock(&sched.lock)
4116 startIdle(n)
4117 }
4118
4119 if runqputbatch(pp, &q); !q.empty() {
4120 lock(&sched.lock)
4121 globrunqputbatch(&q)
4122 unlock(&sched.lock)
4123 }
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138 wakep()
4139 }
4140
4141
4142
4143 func schedule() {
4144 mp := getg().m
4145
4146 if mp.locks != 0 {
4147 throw("schedule: holding locks")
4148 }
4149
4150 if mp.lockedg != 0 {
4151 stoplockedm()
4152 execute(mp.lockedg.ptr(), false)
4153 }
4154
4155
4156
4157 if mp.incgo {
4158 throw("schedule: in cgo")
4159 }
4160
4161 top:
4162 pp := mp.p.ptr()
4163 pp.preempt = false
4164
4165
4166
4167
4168 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4169 throw("schedule: spinning with local work")
4170 }
4171
4172 gp, inheritTime, tryWakeP := findRunnable()
4173
4174
4175 pp = mp.p.ptr()
4176
4177
4178
4179
4180 mp.clearAllpSnapshot()
4181
4182
4183
4184
4185
4186
4187
4188
4189 gcController.releaseNextGCMarkWorker(pp)
4190
4191 if debug.dontfreezetheworld > 0 && freezing.Load() {
4192
4193
4194
4195
4196
4197
4198
4199 lock(&deadlock)
4200 lock(&deadlock)
4201 }
4202
4203
4204
4205
4206 if mp.spinning {
4207 resetspinning()
4208 }
4209
4210 if sched.disable.user && !schedEnabled(gp) {
4211
4212
4213
4214 lock(&sched.lock)
4215 if schedEnabled(gp) {
4216
4217
4218 unlock(&sched.lock)
4219 } else {
4220 sched.disable.runnable.pushBack(gp)
4221 unlock(&sched.lock)
4222 goto top
4223 }
4224 }
4225
4226
4227
4228 if tryWakeP {
4229 wakep()
4230 }
4231 if gp.lockedm != 0 {
4232
4233
4234 startlockedm(gp)
4235 goto top
4236 }
4237
4238 execute(gp, inheritTime)
4239 }
4240
4241
4242
4243
4244
4245
4246
4247
4248 func dropg() {
4249 gp := getg()
4250
4251 setMNoWB(&gp.m.curg.m, nil)
4252 setGNoWB(&gp.m.curg, nil)
4253 }
4254
4255 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4256 unlock((*mutex)(lock))
4257 return true
4258 }
4259
4260
4261 func park_m(gp *g) {
4262 mp := getg().m
4263
4264 trace := traceAcquire()
4265
4266
4267
4268
4269
4270 bubble := gp.bubble
4271 if bubble != nil {
4272 bubble.incActive()
4273 }
4274
4275 if trace.ok() {
4276
4277
4278
4279 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4280 }
4281
4282
4283 casgstatus(gp, _Grunning, _Gwaiting)
4284 if trace.ok() {
4285 traceRelease(trace)
4286 }
4287
4288 dropg()
4289
4290 if fn := mp.waitunlockf; fn != nil {
4291 ok := fn(gp, mp.waitlock)
4292 mp.waitunlockf = nil
4293 mp.waitlock = nil
4294 if !ok {
4295 trace := traceAcquire()
4296 casgstatus(gp, _Gwaiting, _Grunnable)
4297 if bubble != nil {
4298 bubble.decActive()
4299 }
4300 if trace.ok() {
4301 trace.GoUnpark(gp, 2)
4302 traceRelease(trace)
4303 }
4304 execute(gp, true)
4305 }
4306 }
4307
4308 if bubble != nil {
4309 bubble.decActive()
4310 }
4311
4312 schedule()
4313 }
4314
4315 func goschedImpl(gp *g, preempted bool) {
4316 pp := gp.m.p.ptr()
4317 trace := traceAcquire()
4318 status := readgstatus(gp)
4319 if status&^_Gscan != _Grunning {
4320 dumpgstatus(gp)
4321 throw("bad g status")
4322 }
4323 if trace.ok() {
4324
4325
4326
4327 if preempted {
4328 trace.GoPreempt()
4329 } else {
4330 trace.GoSched()
4331 }
4332 }
4333 casgstatus(gp, _Grunning, _Grunnable)
4334 if trace.ok() {
4335 traceRelease(trace)
4336 }
4337
4338 dropg()
4339 if preempted && sched.gcwaiting.Load() {
4340
4341
4342 runqput(pp, gp, true)
4343 } else {
4344 lock(&sched.lock)
4345 globrunqput(gp)
4346 unlock(&sched.lock)
4347 }
4348
4349 if mainStarted {
4350 wakep()
4351 }
4352
4353 schedule()
4354 }
4355
4356
4357 func gosched_m(gp *g) {
4358 goschedImpl(gp, false)
4359 }
4360
4361
4362 func goschedguarded_m(gp *g) {
4363 if !canPreemptM(gp.m) {
4364 gogo(&gp.sched)
4365 }
4366 goschedImpl(gp, false)
4367 }
4368
4369 func gopreempt_m(gp *g) {
4370 goschedImpl(gp, true)
4371 }
4372
4373
4374
4375
4376 func preemptPark(gp *g) {
4377 status := readgstatus(gp)
4378 if status&^_Gscan != _Grunning {
4379 dumpgstatus(gp)
4380 throw("bad g status")
4381 }
4382
4383 if gp.asyncSafePoint {
4384
4385
4386
4387 f := findfunc(gp.sched.pc)
4388 if !f.valid() {
4389 throw("preempt at unknown pc")
4390 }
4391 if f.flag&abi.FuncFlagSPWrite != 0 {
4392 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4393 throw("preempt SPWRITE")
4394 }
4395 }
4396
4397
4398
4399
4400
4401
4402
4403 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425 trace := traceAcquire()
4426 if trace.ok() {
4427 trace.GoPark(traceBlockPreempted, 0)
4428 }
4429
4430
4431
4432
4433 dropg()
4434
4435
4436 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4437 if trace.ok() {
4438 traceRelease(trace)
4439 }
4440
4441
4442 schedule()
4443 }
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459 func goyield() {
4460 checkTimeouts()
4461 mcall(goyield_m)
4462 }
4463
4464 func goyield_m(gp *g) {
4465 trace := traceAcquire()
4466 pp := gp.m.p.ptr()
4467 if trace.ok() {
4468
4469
4470
4471 trace.GoPreempt()
4472 }
4473 casgstatus(gp, _Grunning, _Grunnable)
4474 if trace.ok() {
4475 traceRelease(trace)
4476 }
4477 dropg()
4478 runqput(pp, gp, false)
4479 schedule()
4480 }
4481
4482
4483 func goexit1() {
4484 if raceenabled {
4485 if gp := getg(); gp.bubble != nil {
4486 racereleasemergeg(gp, gp.bubble.raceaddr())
4487 }
4488 racegoend()
4489 }
4490 trace := traceAcquire()
4491 if trace.ok() {
4492 trace.GoEnd()
4493 traceRelease(trace)
4494 }
4495 mcall(goexit0)
4496 }
4497
4498
4499 func goexit0(gp *g) {
4500 if goexperiment.RuntimeSecret && gp.secret > 0 {
4501
4502
4503 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4504
4505
4506 }
4507 gdestroy(gp)
4508 schedule()
4509 }
4510
4511 func gdestroy(gp *g) {
4512 mp := getg().m
4513 pp := mp.p.ptr()
4514
4515 casgstatus(gp, _Grunning, _Gdead)
4516 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4517 if isSystemGoroutine(gp, false) {
4518 sched.ngsys.Add(-1)
4519 }
4520 gp.m = nil
4521 locked := gp.lockedm != 0
4522 gp.lockedm = 0
4523 mp.lockedg = 0
4524 gp.preemptStop = false
4525 gp.paniconfault = false
4526 gp._defer = nil
4527 gp._panic = nil
4528 gp.writebuf = nil
4529 gp.waitreason = waitReasonZero
4530 gp.param = nil
4531 gp.labels = nil
4532 gp.timer = nil
4533 gp.bubble = nil
4534 gp.fipsOnlyBypass = false
4535 gp.secret = 0
4536
4537 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4538
4539
4540
4541 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4542 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4543 gcController.bgScanCredit.Add(scanCredit)
4544 gp.gcAssistBytes = 0
4545 }
4546
4547 dropg()
4548
4549 if GOARCH == "wasm" {
4550 gfput(pp, gp)
4551 return
4552 }
4553
4554 if locked && mp.lockedInt != 0 {
4555 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4556 if mp.isextra {
4557 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4558 }
4559 throw("exited a goroutine internally locked to the OS thread")
4560 }
4561 gfput(pp, gp)
4562 if locked {
4563
4564
4565
4566
4567
4568
4569 if GOOS != "plan9" {
4570 gogo(&mp.g0.sched)
4571 } else {
4572
4573
4574 mp.lockedExt = 0
4575 }
4576 }
4577 }
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587 func save(pc, sp, bp uintptr) {
4588 gp := getg()
4589
4590 if gp == gp.m.g0 || gp == gp.m.gsignal {
4591
4592
4593
4594
4595
4596 throw("save on system g not allowed")
4597 }
4598
4599 gp.sched.pc = pc
4600 gp.sched.sp = sp
4601 gp.sched.lr = 0
4602 gp.sched.bp = bp
4603
4604
4605
4606 if gp.sched.ctxt != nil {
4607 badctxt()
4608 }
4609 }
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635 func reentersyscall(pc, sp, bp uintptr) {
4636 gp := getg()
4637
4638
4639
4640 gp.m.locks++
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662 if goexperiment.RuntimeSecret {
4663 eraseSecretsSignalStk()
4664 }
4665
4666
4667
4668
4669
4670 gp.stackguard0 = stackPreempt
4671 gp.throwsplit = true
4672
4673
4674 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4675
4676 pp := gp.m.p.ptr()
4677 if pp.runSafePointFn != 0 {
4678
4679 systemstack(runSafePointFn)
4680 }
4681 gp.m.oldp.set(pp)
4682
4683
4684 save(pc, sp, bp)
4685 gp.syscallsp = sp
4686 gp.syscallpc = pc
4687 gp.syscallbp = bp
4688
4689
4690 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4691 systemstack(func() {
4692 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4693 throw("entersyscall")
4694 })
4695 }
4696 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4697 systemstack(func() {
4698 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4699 throw("entersyscall")
4700 })
4701 }
4702 trace := traceAcquire()
4703 if trace.ok() {
4704
4705
4706
4707
4708 systemstack(func() {
4709 trace.GoSysCall()
4710 })
4711
4712 save(pc, sp, bp)
4713 }
4714 if sched.gcwaiting.Load() {
4715
4716
4717
4718 systemstack(func() {
4719 entersyscallHandleGCWait(trace)
4720 })
4721
4722 save(pc, sp, bp)
4723 }
4724
4725
4726
4727
4728
4729 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4730 casgstatus(gp, _Grunning, _Gsyscall)
4731 }
4732 if staticLockRanking {
4733
4734 save(pc, sp, bp)
4735 }
4736 if trace.ok() {
4737
4738
4739
4740 traceRelease(trace)
4741 }
4742 if sched.sysmonwait.Load() {
4743 systemstack(entersyscallWakeSysmon)
4744
4745 save(pc, sp, bp)
4746 }
4747 gp.m.locks--
4748 }
4749
4750
4751
4752
4753 const debugExtendGrunningNoP = false
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769 func entersyscall() {
4770
4771
4772
4773
4774 fp := getcallerfp()
4775 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4776 }
4777
4778 func entersyscallWakeSysmon() {
4779 lock(&sched.lock)
4780 if sched.sysmonwait.Load() {
4781 sched.sysmonwait.Store(false)
4782 notewakeup(&sched.sysmonnote)
4783 }
4784 unlock(&sched.lock)
4785 }
4786
4787 func entersyscallHandleGCWait(trace traceLocker) {
4788 gp := getg()
4789
4790 lock(&sched.lock)
4791 if sched.stopwait > 0 {
4792
4793 pp := gp.m.p.ptr()
4794 pp.m = 0
4795 gp.m.p = 0
4796 atomic.Store(&pp.status, _Pgcstop)
4797
4798 if trace.ok() {
4799 trace.ProcStop(pp)
4800 }
4801 addGSyscallNoP(gp.m)
4802 pp.gcStopTime = nanotime()
4803 pp.syscalltick++
4804 if sched.stopwait--; sched.stopwait == 0 {
4805 notewakeup(&sched.stopnote)
4806 }
4807 }
4808 unlock(&sched.lock)
4809 }
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823 func entersyscallblock() {
4824 gp := getg()
4825
4826 gp.m.locks++
4827 gp.throwsplit = true
4828 gp.stackguard0 = stackPreempt
4829 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4830 gp.m.p.ptr().syscalltick++
4831
4832 addGSyscallNoP(gp.m)
4833
4834
4835 pc := sys.GetCallerPC()
4836 sp := sys.GetCallerSP()
4837 bp := getcallerfp()
4838 save(pc, sp, bp)
4839 gp.syscallsp = gp.sched.sp
4840 gp.syscallpc = gp.sched.pc
4841 gp.syscallbp = gp.sched.bp
4842 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4843 sp1 := sp
4844 sp2 := gp.sched.sp
4845 sp3 := gp.syscallsp
4846 systemstack(func() {
4847 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4848 throw("entersyscallblock")
4849 })
4850 }
4851
4852
4853
4854
4855
4856
4857 trace := traceAcquire()
4858 systemstack(func() {
4859 if trace.ok() {
4860 trace.GoSysCall()
4861 }
4862 handoffp(releasep())
4863 })
4864
4865
4866
4867 if debugExtendGrunningNoP {
4868 usleep(10)
4869 }
4870 casgstatus(gp, _Grunning, _Gsyscall)
4871 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4872 systemstack(func() {
4873 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4874 throw("entersyscallblock")
4875 })
4876 }
4877 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4878 systemstack(func() {
4879 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4880 throw("entersyscallblock")
4881 })
4882 }
4883 if trace.ok() {
4884 systemstack(func() {
4885 traceRelease(trace)
4886 })
4887 }
4888
4889
4890 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4891
4892 gp.m.locks--
4893 }
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915 func exitsyscall() {
4916 gp := getg()
4917
4918 gp.m.locks++
4919 if sys.GetCallerSP() > gp.syscallsp {
4920 throw("exitsyscall: syscall frame is no longer valid")
4921 }
4922 gp.waitsince = 0
4923
4924 if sched.stopwait == freezeStopWait {
4925
4926
4927
4928 systemstack(func() {
4929 lock(&deadlock)
4930 lock(&deadlock)
4931 })
4932 }
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4946 casgstatus(gp, _Gsyscall, _Grunning)
4947 }
4948
4949
4950
4951
4952 if debugExtendGrunningNoP {
4953 usleep(10)
4954 }
4955
4956
4957 oldp := gp.m.oldp.ptr()
4958 gp.m.oldp.set(nil)
4959
4960
4961 pp := gp.m.p.ptr()
4962 if pp != nil {
4963
4964 if trace := traceAcquire(); trace.ok() {
4965 systemstack(func() {
4966
4967
4968
4969
4970
4971
4972
4973
4974 if pp.syscalltick == gp.m.syscalltick {
4975 trace.GoSysExit(false)
4976 } else {
4977
4978
4979
4980
4981 trace.ProcSteal(pp)
4982 trace.ProcStart()
4983 trace.GoSysExit(true)
4984 trace.GoStart()
4985 }
4986 traceRelease(trace)
4987 })
4988 }
4989 } else {
4990
4991 systemstack(func() {
4992
4993 if pp := exitsyscallTryGetP(oldp); pp != nil {
4994
4995 acquirepNoTrace(pp)
4996
4997
4998 if trace := traceAcquire(); trace.ok() {
4999 trace.ProcStart()
5000 trace.GoSysExit(true)
5001 trace.GoStart()
5002 traceRelease(trace)
5003 }
5004 }
5005 })
5006 pp = gp.m.p.ptr()
5007 }
5008
5009
5010 if pp != nil {
5011 if goroutineProfile.active {
5012
5013
5014
5015 systemstack(func() {
5016 tryRecordGoroutineProfileWB(gp)
5017 })
5018 }
5019
5020
5021 pp.syscalltick++
5022
5023
5024
5025 gp.syscallsp = 0
5026 gp.m.locks--
5027 if gp.preempt {
5028
5029 gp.stackguard0 = stackPreempt
5030 } else {
5031
5032 gp.stackguard0 = gp.stack.lo + stackGuard
5033 }
5034 gp.throwsplit = false
5035
5036 if sched.disable.user && !schedEnabled(gp) {
5037
5038 Gosched()
5039 }
5040 return
5041 }
5042
5043 gp.m.locks--
5044
5045
5046 mcall(exitsyscallNoP)
5047
5048
5049
5050
5051
5052
5053
5054 gp.syscallsp = 0
5055 gp.m.p.ptr().syscalltick++
5056 gp.throwsplit = false
5057 }
5058
5059
5060
5061
5062
5063
5064
5065 func exitsyscallTryGetP(oldp *p) *p {
5066
5067 if oldp != nil {
5068 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5069 thread.takeP()
5070 decGSyscallNoP(getg().m)
5071 thread.resume()
5072 return oldp
5073 }
5074 }
5075
5076
5077 if sched.pidle != 0 {
5078 lock(&sched.lock)
5079 pp, _ := pidleget(0)
5080 if pp != nil && sched.sysmonwait.Load() {
5081 sched.sysmonwait.Store(false)
5082 notewakeup(&sched.sysmonnote)
5083 }
5084 unlock(&sched.lock)
5085 if pp != nil {
5086 decGSyscallNoP(getg().m)
5087 return pp
5088 }
5089 }
5090 return nil
5091 }
5092
5093
5094
5095
5096
5097
5098
5099 func exitsyscallNoP(gp *g) {
5100 traceExitingSyscall()
5101 trace := traceAcquire()
5102 casgstatus(gp, _Grunning, _Grunnable)
5103 traceExitedSyscall()
5104 if trace.ok() {
5105
5106
5107
5108
5109 trace.GoSysExit(true)
5110 traceRelease(trace)
5111 }
5112 decGSyscallNoP(getg().m)
5113 dropg()
5114 lock(&sched.lock)
5115 var pp *p
5116 if schedEnabled(gp) {
5117 pp, _ = pidleget(0)
5118 }
5119 var locked bool
5120 if pp == nil {
5121 globrunqput(gp)
5122
5123
5124
5125
5126
5127
5128 locked = gp.lockedm != 0
5129 } else if sched.sysmonwait.Load() {
5130 sched.sysmonwait.Store(false)
5131 notewakeup(&sched.sysmonnote)
5132 }
5133 unlock(&sched.lock)
5134 if pp != nil {
5135 acquirep(pp)
5136 execute(gp, false)
5137 }
5138 if locked {
5139
5140
5141
5142
5143 stoplockedm()
5144 execute(gp, false)
5145 }
5146 stopm()
5147 schedule()
5148 }
5149
5150
5151
5152
5153
5154
5155
5156 func addGSyscallNoP(mp *m) {
5157
5158
5159
5160 if !mp.isExtraInC {
5161
5162
5163
5164
5165
5166 sched.nGsyscallNoP.Add(1)
5167 }
5168 }
5169
5170
5171
5172
5173
5174
5175
5176 func decGSyscallNoP(mp *m) {
5177
5178
5179
5180 if !mp.isExtraInC {
5181 sched.nGsyscallNoP.Add(-1)
5182 }
5183 }
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197 func syscall_runtime_BeforeFork() {
5198 gp := getg().m.curg
5199
5200
5201
5202
5203 gp.m.locks++
5204 sigsave(&gp.m.sigmask)
5205 sigblock(false)
5206
5207
5208
5209
5210
5211 gp.stackguard0 = stackFork
5212 }
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226 func syscall_runtime_AfterFork() {
5227 gp := getg().m.curg
5228
5229
5230 gp.stackguard0 = gp.stack.lo + stackGuard
5231
5232 msigrestore(gp.m.sigmask)
5233
5234 gp.m.locks--
5235 }
5236
5237
5238
5239 var inForkedChild bool
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260 func syscall_runtime_AfterForkInChild() {
5261
5262
5263
5264
5265 inForkedChild = true
5266
5267 clearSignalHandlers()
5268
5269
5270
5271 msigrestore(getg().m.sigmask)
5272
5273 inForkedChild = false
5274 }
5275
5276
5277
5278
5279 var pendingPreemptSignals atomic.Int32
5280
5281
5282
5283
5284 func syscall_runtime_BeforeExec() {
5285
5286 execLock.lock()
5287
5288
5289
5290 if GOOS == "darwin" || GOOS == "ios" {
5291 for pendingPreemptSignals.Load() > 0 {
5292 osyield()
5293 }
5294 }
5295 }
5296
5297
5298
5299
5300 func syscall_runtime_AfterExec() {
5301 execLock.unlock()
5302 }
5303
5304
5305 func malg(stacksize int32) *g {
5306 newg := new(g)
5307 if stacksize >= 0 {
5308 stacksize = round2(stackSystem + stacksize)
5309 systemstack(func() {
5310 newg.stack = stackalloc(uint32(stacksize))
5311 if valgrindenabled {
5312 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5313 }
5314 })
5315 newg.stackguard0 = newg.stack.lo + stackGuard
5316 newg.stackguard1 = ^uintptr(0)
5317
5318
5319 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5320 }
5321 return newg
5322 }
5323
5324
5325
5326
5327 func newproc(fn *funcval) {
5328 gp := getg()
5329 pc := sys.GetCallerPC()
5330 systemstack(func() {
5331 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5332
5333 pp := getg().m.p.ptr()
5334 runqput(pp, newg, true)
5335
5336 if mainStarted {
5337 wakep()
5338 }
5339 })
5340 }
5341
5342
5343
5344
5345 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5346 if fn == nil {
5347 fatal("go of nil func value")
5348 }
5349
5350 mp := acquirem()
5351 pp := mp.p.ptr()
5352 newg := gfget(pp)
5353 if newg == nil {
5354 newg = malg(stackMin)
5355 casgstatus(newg, _Gidle, _Gdead)
5356 allgadd(newg)
5357 }
5358 if newg.stack.hi == 0 {
5359 throw("newproc1: newg missing stack")
5360 }
5361
5362 if readgstatus(newg) != _Gdead {
5363 throw("newproc1: new g is not Gdead")
5364 }
5365
5366 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5367 totalSize = alignUp(totalSize, sys.StackAlign)
5368 sp := newg.stack.hi - totalSize
5369 if usesLR {
5370
5371 *(*uintptr)(unsafe.Pointer(sp)) = 0
5372 prepGoExitFrame(sp)
5373 }
5374 if GOARCH == "arm64" {
5375
5376 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5377 }
5378
5379 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5380 newg.sched.sp = sp
5381 newg.stktopsp = sp
5382 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5383 newg.sched.g = guintptr(unsafe.Pointer(newg))
5384 gostartcallfn(&newg.sched, fn)
5385 newg.parentGoid = callergp.goid
5386 newg.gopc = callerpc
5387 newg.ancestors = saveAncestors(callergp)
5388 newg.startpc = fn.fn
5389 newg.runningCleanups.Store(false)
5390 if isSystemGoroutine(newg, false) {
5391 sched.ngsys.Add(1)
5392 } else {
5393
5394 newg.bubble = callergp.bubble
5395 if mp.curg != nil {
5396 newg.labels = mp.curg.labels
5397 }
5398 if goroutineProfile.active {
5399
5400
5401
5402
5403
5404 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5405 }
5406 }
5407
5408 newg.trackingSeq = uint8(cheaprand())
5409 if newg.trackingSeq%gTrackingPeriod == 0 {
5410 newg.tracking = true
5411 }
5412 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5413
5414
5415
5416 trace := traceAcquire()
5417 var status uint32 = _Grunnable
5418 if parked {
5419 status = _Gwaiting
5420 newg.waitreason = waitreason
5421 }
5422 if pp.goidcache == pp.goidcacheend {
5423
5424
5425
5426 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5427 pp.goidcache -= _GoidCacheBatch - 1
5428 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5429 }
5430 newg.goid = pp.goidcache
5431 casgstatus(newg, _Gdead, status)
5432 pp.goidcache++
5433 newg.trace.reset()
5434 if trace.ok() {
5435 trace.GoCreate(newg, newg.startpc, parked)
5436 traceRelease(trace)
5437 }
5438
5439
5440 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5441
5442
5443 newg.ditWanted = callergp.ditWanted
5444
5445
5446 if raceenabled {
5447 newg.racectx = racegostart(callerpc)
5448 newg.raceignore = 0
5449 if newg.labels != nil {
5450
5451
5452 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5453 }
5454 }
5455 pp.goroutinesCreated++
5456 releasem(mp)
5457
5458 return newg
5459 }
5460
5461
5462
5463
5464 func saveAncestors(callergp *g) *[]ancestorInfo {
5465
5466 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5467 return nil
5468 }
5469 var callerAncestors []ancestorInfo
5470 if callergp.ancestors != nil {
5471 callerAncestors = *callergp.ancestors
5472 }
5473 n := int32(len(callerAncestors)) + 1
5474 if n > debug.tracebackancestors {
5475 n = debug.tracebackancestors
5476 }
5477 ancestors := make([]ancestorInfo, n)
5478 copy(ancestors[1:], callerAncestors)
5479
5480 var pcs [tracebackInnerFrames]uintptr
5481 npcs := gcallers(callergp, 0, pcs[:])
5482 ipcs := make([]uintptr, npcs)
5483 copy(ipcs, pcs[:])
5484 ancestors[0] = ancestorInfo{
5485 pcs: ipcs,
5486 goid: callergp.goid,
5487 gopc: callergp.gopc,
5488 }
5489
5490 ancestorsp := new([]ancestorInfo)
5491 *ancestorsp = ancestors
5492 return ancestorsp
5493 }
5494
5495
5496
5497 func gfput(pp *p, gp *g) {
5498 if readgstatus(gp) != _Gdead {
5499 throw("gfput: bad status (not Gdead)")
5500 }
5501
5502 stksize := gp.stack.hi - gp.stack.lo
5503
5504 if stksize != uintptr(startingStackSize) {
5505
5506 stackfree(gp.stack)
5507 gp.stack.lo = 0
5508 gp.stack.hi = 0
5509 gp.stackguard0 = 0
5510 if valgrindenabled {
5511 valgrindDeregisterStack(gp.valgrindStackID)
5512 gp.valgrindStackID = 0
5513 }
5514 }
5515
5516 pp.gFree.push(gp)
5517 if pp.gFree.size >= 64 {
5518 var (
5519 stackQ gQueue
5520 noStackQ gQueue
5521 )
5522 for pp.gFree.size >= 32 {
5523 gp := pp.gFree.pop()
5524 if gp.stack.lo == 0 {
5525 noStackQ.push(gp)
5526 } else {
5527 stackQ.push(gp)
5528 }
5529 }
5530 lock(&sched.gFree.lock)
5531 sched.gFree.noStack.pushAll(noStackQ)
5532 sched.gFree.stack.pushAll(stackQ)
5533 unlock(&sched.gFree.lock)
5534 }
5535 }
5536
5537
5538
5539 func gfget(pp *p) *g {
5540 retry:
5541 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5542 lock(&sched.gFree.lock)
5543
5544 for pp.gFree.size < 32 {
5545
5546 gp := sched.gFree.stack.pop()
5547 if gp == nil {
5548 gp = sched.gFree.noStack.pop()
5549 if gp == nil {
5550 break
5551 }
5552 }
5553 pp.gFree.push(gp)
5554 }
5555 unlock(&sched.gFree.lock)
5556 goto retry
5557 }
5558 gp := pp.gFree.pop()
5559 if gp == nil {
5560 return nil
5561 }
5562 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5563
5564
5565
5566 systemstack(func() {
5567 stackfree(gp.stack)
5568 gp.stack.lo = 0
5569 gp.stack.hi = 0
5570 gp.stackguard0 = 0
5571 if valgrindenabled {
5572 valgrindDeregisterStack(gp.valgrindStackID)
5573 gp.valgrindStackID = 0
5574 }
5575 })
5576 }
5577 if gp.stack.lo == 0 {
5578
5579 systemstack(func() {
5580 gp.stack = stackalloc(startingStackSize)
5581 if valgrindenabled {
5582 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5583 }
5584 })
5585 gp.stackguard0 = gp.stack.lo + stackGuard
5586 } else {
5587 if raceenabled {
5588 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5589 }
5590 if msanenabled {
5591 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5592 }
5593 if asanenabled {
5594 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5595 }
5596 }
5597 return gp
5598 }
5599
5600
5601 func gfpurge(pp *p) {
5602 var (
5603 stackQ gQueue
5604 noStackQ gQueue
5605 )
5606 for !pp.gFree.empty() {
5607 gp := pp.gFree.pop()
5608 if gp.stack.lo == 0 {
5609 noStackQ.push(gp)
5610 } else {
5611 stackQ.push(gp)
5612 }
5613 }
5614 lock(&sched.gFree.lock)
5615 sched.gFree.noStack.pushAll(noStackQ)
5616 sched.gFree.stack.pushAll(stackQ)
5617 unlock(&sched.gFree.lock)
5618 }
5619
5620
5621 func Breakpoint() {
5622 breakpoint()
5623 }
5624
5625
5626
5627
5628
5629
5630 func dolockOSThread() {
5631 if GOARCH == "wasm" {
5632 return
5633 }
5634 gp := getg()
5635 gp.m.lockedg.set(gp)
5636 gp.lockedm.set(gp.m)
5637 }
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655 func LockOSThread() {
5656 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5657
5658
5659
5660 startTemplateThread()
5661 }
5662 gp := getg()
5663 gp.m.lockedExt++
5664 if gp.m.lockedExt == 0 {
5665 gp.m.lockedExt--
5666 panic("LockOSThread nesting overflow")
5667 }
5668 dolockOSThread()
5669 }
5670
5671
5672 func lockOSThread() {
5673 getg().m.lockedInt++
5674 dolockOSThread()
5675 }
5676
5677
5678
5679
5680
5681
5682 func dounlockOSThread() {
5683 if GOARCH == "wasm" {
5684 return
5685 }
5686 gp := getg()
5687 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5688 return
5689 }
5690 gp.m.lockedg = 0
5691 gp.lockedm = 0
5692 }
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708 func UnlockOSThread() {
5709 gp := getg()
5710 if gp.m.lockedExt == 0 {
5711 return
5712 }
5713 gp.m.lockedExt--
5714 dounlockOSThread()
5715 }
5716
5717
5718 func unlockOSThread() {
5719 gp := getg()
5720 if gp.m.lockedInt == 0 {
5721 systemstack(badunlockosthread)
5722 }
5723 gp.m.lockedInt--
5724 dounlockOSThread()
5725 }
5726
5727 func badunlockosthread() {
5728 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5729 }
5730
5731 func gcount(includeSys bool) int32 {
5732 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5733 if !includeSys {
5734 n -= sched.ngsys.Load()
5735 }
5736 for _, pp := range allp {
5737 n -= pp.gFree.size
5738 }
5739
5740
5741
5742 if n < 1 {
5743 n = 1
5744 }
5745 return n
5746 }
5747
5748
5749
5750
5751
5752 func goroutineleakcount() int {
5753 return work.goroutineLeak.count
5754 }
5755
5756 func mcount() int32 {
5757 return int32(sched.mnext - sched.nmfreed)
5758 }
5759
5760 var prof struct {
5761 signalLock atomic.Uint32
5762
5763
5764
5765 hz atomic.Int32
5766 }
5767
5768 func _System() { _System() }
5769 func _ExternalCode() { _ExternalCode() }
5770 func _LostExternalCode() { _LostExternalCode() }
5771 func _GC() { _GC() }
5772 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5773 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5774 func _VDSO() { _VDSO() }
5775
5776
5777
5778
5779
5780 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5781 if prof.hz.Load() == 0 {
5782 return
5783 }
5784
5785
5786
5787
5788 if mp != nil && mp.profilehz == 0 {
5789 return
5790 }
5791
5792
5793
5794
5795
5796
5797
5798 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5799 if f := findfunc(pc); f.valid() {
5800 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5801 cpuprof.lostAtomic++
5802 return
5803 }
5804 }
5805 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5806
5807
5808
5809 cpuprof.lostAtomic++
5810 return
5811 }
5812 }
5813
5814
5815
5816
5817
5818
5819
5820 getg().m.mallocing++
5821
5822 var u unwinder
5823 var stk [maxCPUProfStack]uintptr
5824 n := 0
5825 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5826 cgoOff := 0
5827
5828
5829
5830
5831
5832 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5833 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5834 cgoOff++
5835 }
5836 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5837 mp.cgoCallers[0] = 0
5838 }
5839
5840
5841 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5842 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5843
5844
5845 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5846 } else if mp != nil && mp.vdsoSP != 0 {
5847
5848
5849 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5850 } else {
5851 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5852 }
5853 n += tracebackPCs(&u, 0, stk[n:])
5854
5855 if n <= 0 {
5856
5857
5858 n = 2
5859 if inVDSOPage(pc) {
5860 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5861 } else if pc > firstmoduledata.etext {
5862
5863 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5864 }
5865 stk[0] = pc
5866 if mp.preemptoff != "" {
5867 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5868 } else {
5869 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5870 }
5871 }
5872
5873 if prof.hz.Load() != 0 {
5874
5875
5876
5877 var tagPtr *unsafe.Pointer
5878 if gp != nil && gp.m != nil && gp.m.curg != nil {
5879 tagPtr = &gp.m.curg.labels
5880 }
5881 cpuprof.add(tagPtr, stk[:n])
5882
5883 gprof := gp
5884 var mp *m
5885 var pp *p
5886 if gp != nil && gp.m != nil {
5887 if gp.m.curg != nil {
5888 gprof = gp.m.curg
5889 }
5890 mp = gp.m
5891 pp = gp.m.p.ptr()
5892 }
5893 traceCPUSample(gprof, mp, pp, stk[:n])
5894 }
5895 getg().m.mallocing--
5896 }
5897
5898
5899
5900 func setcpuprofilerate(hz int32) {
5901
5902 if hz < 0 {
5903 hz = 0
5904 }
5905
5906
5907
5908 gp := getg()
5909 gp.m.locks++
5910
5911
5912
5913
5914 setThreadCPUProfiler(0)
5915
5916 for !prof.signalLock.CompareAndSwap(0, 1) {
5917 osyield()
5918 }
5919 if prof.hz.Load() != hz {
5920 setProcessCPUProfiler(hz)
5921 prof.hz.Store(hz)
5922 }
5923 prof.signalLock.Store(0)
5924
5925 lock(&sched.lock)
5926 sched.profilehz = hz
5927 unlock(&sched.lock)
5928
5929 if hz != 0 {
5930 setThreadCPUProfiler(hz)
5931 }
5932
5933 gp.m.locks--
5934 }
5935
5936
5937
5938 func (pp *p) init(id int32) {
5939 pp.id = id
5940 pp.gcw.id = id
5941 pp.status = _Pgcstop
5942 pp.sudogcache = pp.sudogbuf[:0]
5943 pp.deferpool = pp.deferpoolbuf[:0]
5944 pp.wbBuf.reset()
5945 if pp.mcache == nil {
5946 if id == 0 {
5947 if mcache0 == nil {
5948 throw("missing mcache?")
5949 }
5950
5951
5952 pp.mcache = mcache0
5953 } else {
5954 pp.mcache = allocmcache()
5955 }
5956 }
5957 if raceenabled && pp.raceprocctx == 0 {
5958 if id == 0 {
5959 pp.raceprocctx = raceprocctx0
5960 raceprocctx0 = 0
5961 } else {
5962 pp.raceprocctx = raceproccreate()
5963 }
5964 }
5965 lockInit(&pp.timers.mu, lockRankTimers)
5966
5967
5968
5969 timerpMask.set(id)
5970
5971
5972 idlepMask.clear(id)
5973 }
5974
5975
5976
5977
5978
5979 func (pp *p) destroy() {
5980 assertLockHeld(&sched.lock)
5981 assertWorldStopped()
5982
5983
5984 for pp.runqhead != pp.runqtail {
5985
5986 pp.runqtail--
5987 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5988
5989 globrunqputhead(gp)
5990 }
5991 if pp.runnext != 0 {
5992 globrunqputhead(pp.runnext.ptr())
5993 pp.runnext = 0
5994 }
5995
5996
5997 getg().m.p.ptr().timers.take(&pp.timers)
5998
5999
6000
6001 if phase := gcphase; phase != _GCoff {
6002 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
6003 throw("P destroyed while GC is running")
6004 }
6005
6006 pp.gcw.spanq.destroy()
6007
6008 clear(pp.sudogbuf[:])
6009 pp.sudogcache = pp.sudogbuf[:0]
6010 pp.pinnerCache = nil
6011 clear(pp.deferpoolbuf[:])
6012 pp.deferpool = pp.deferpoolbuf[:0]
6013 systemstack(func() {
6014 for i := 0; i < pp.mspancache.len; i++ {
6015
6016 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
6017 }
6018 pp.mspancache.len = 0
6019 lock(&mheap_.lock)
6020 pp.pcache.flush(&mheap_.pages)
6021 unlock(&mheap_.lock)
6022 })
6023 freemcache(pp.mcache)
6024 pp.mcache = nil
6025 gfpurge(pp)
6026 if raceenabled {
6027 if pp.timers.raceCtx != 0 {
6028
6029
6030
6031
6032
6033 mp := getg().m
6034 phold := mp.p.ptr()
6035 mp.p.set(pp)
6036
6037 racectxend(pp.timers.raceCtx)
6038 pp.timers.raceCtx = 0
6039
6040 mp.p.set(phold)
6041 }
6042 raceprocdestroy(pp.raceprocctx)
6043 pp.raceprocctx = 0
6044 }
6045 pp.gcAssistTime = 0
6046 gcCleanups.queued += pp.cleanupsQueued
6047 pp.cleanupsQueued = 0
6048 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6049 pp.goroutinesCreated = 0
6050 pp.xRegs.free()
6051 pp.status = _Pdead
6052 }
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062 func procresize(nprocs int32) *p {
6063 assertLockHeld(&sched.lock)
6064 assertWorldStopped()
6065
6066 old := gomaxprocs
6067 if old < 0 || nprocs <= 0 {
6068 throw("procresize: invalid arg")
6069 }
6070 trace := traceAcquire()
6071 if trace.ok() {
6072 trace.Gomaxprocs(nprocs)
6073 traceRelease(trace)
6074 }
6075
6076
6077 now := nanotime()
6078 if sched.procresizetime != 0 {
6079 sched.totaltime += int64(old) * (now - sched.procresizetime)
6080 }
6081 sched.procresizetime = now
6082
6083
6084 if nprocs > int32(len(allp)) {
6085
6086
6087 lock(&allpLock)
6088 if nprocs <= int32(cap(allp)) {
6089 allp = allp[:nprocs]
6090 } else {
6091 nallp := make([]*p, nprocs)
6092
6093
6094 copy(nallp, allp[:cap(allp)])
6095 allp = nallp
6096 }
6097
6098 idlepMask = idlepMask.resize(nprocs)
6099 timerpMask = timerpMask.resize(nprocs)
6100 work.spanqMask = work.spanqMask.resize(nprocs)
6101 unlock(&allpLock)
6102 }
6103
6104
6105 for i := old; i < nprocs; i++ {
6106 pp := allp[i]
6107 if pp == nil {
6108 pp = new(p)
6109 }
6110 pp.init(i)
6111 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6112 }
6113
6114 gp := getg()
6115 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6116
6117 gp.m.p.ptr().status = _Prunning
6118 gp.m.p.ptr().mcache.prepareForSweep()
6119 } else {
6120
6121
6122
6123
6124
6125 if gp.m.p != 0 {
6126 trace := traceAcquire()
6127 if trace.ok() {
6128
6129
6130
6131 trace.GoSched()
6132 trace.ProcStop(gp.m.p.ptr())
6133 traceRelease(trace)
6134 }
6135 gp.m.p.ptr().m = 0
6136 }
6137 gp.m.p = 0
6138 pp := allp[0]
6139 pp.m = 0
6140 pp.status = _Pidle
6141 acquirep(pp)
6142 trace := traceAcquire()
6143 if trace.ok() {
6144 trace.GoStart()
6145 traceRelease(trace)
6146 }
6147 }
6148
6149
6150 mcache0 = nil
6151
6152
6153 for i := nprocs; i < old; i++ {
6154 pp := allp[i]
6155 pp.destroy()
6156
6157 }
6158
6159
6160 if int32(len(allp)) != nprocs {
6161 lock(&allpLock)
6162 allp = allp[:nprocs]
6163 idlepMask = idlepMask.resize(nprocs)
6164 timerpMask = timerpMask.resize(nprocs)
6165 work.spanqMask = work.spanqMask.resize(nprocs)
6166 unlock(&allpLock)
6167 }
6168
6169
6170 var runnablePs *p
6171 var runnablePsNeedM *p
6172 var idlePs *p
6173 for i := nprocs - 1; i >= 0; i-- {
6174 pp := allp[i]
6175 if gp.m.p.ptr() == pp {
6176 continue
6177 }
6178 pp.status = _Pidle
6179 if runqempty(pp) {
6180 pp.link.set(idlePs)
6181 idlePs = pp
6182 continue
6183 }
6184
6185
6186
6187
6188
6189
6190
6191
6192 var mp *m
6193 if oldm := pp.oldm.get(); oldm != nil {
6194
6195 mp = mgetSpecific(oldm)
6196 }
6197 if mp == nil {
6198
6199 pp.link.set(runnablePsNeedM)
6200 runnablePsNeedM = pp
6201 continue
6202 }
6203 pp.m.set(mp)
6204 pp.link.set(runnablePs)
6205 runnablePs = pp
6206 }
6207
6208
6209 for runnablePsNeedM != nil {
6210 pp := runnablePsNeedM
6211 runnablePsNeedM = pp.link.ptr()
6212
6213 mp := mget()
6214 pp.m.set(mp)
6215 pp.link.set(runnablePs)
6216 runnablePs = pp
6217 }
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243 if gcBlackenEnabled != 0 {
6244 for idlePs != nil {
6245 pp := idlePs
6246
6247 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6248 if !ok {
6249
6250 break
6251 }
6252
6253
6254
6255
6256
6257
6258
6259
6260 idlePs = pp.link.ptr()
6261 mp := mget()
6262 pp.m.set(mp)
6263 pp.link.set(runnablePs)
6264 runnablePs = pp
6265 }
6266 }
6267
6268
6269 for idlePs != nil {
6270 pp := idlePs
6271 idlePs = pp.link.ptr()
6272 pidleput(pp, now)
6273 }
6274
6275 stealOrder.reset(uint32(nprocs))
6276 var int32p *int32 = &gomaxprocs
6277 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6278 if old != nprocs {
6279
6280 gcCPULimiter.resetCapacity(now, nprocs)
6281 }
6282 return runnablePs
6283 }
6284
6285
6286
6287
6288
6289
6290
6291 func acquirep(pp *p) {
6292
6293 acquirepNoTrace(pp)
6294
6295
6296 trace := traceAcquire()
6297 if trace.ok() {
6298 trace.ProcStart()
6299 traceRelease(trace)
6300 }
6301 }
6302
6303
6304
6305
6306 func acquirepNoTrace(pp *p) {
6307
6308 wirep(pp)
6309
6310
6311
6312
6313
6314
6315 pp.oldm = pp.m.ptr().self
6316
6317
6318
6319 pp.mcache.prepareForSweep()
6320 }
6321
6322
6323
6324
6325
6326
6327
6328 func wirep(pp *p) {
6329 gp := getg()
6330
6331 if gp.m.p != 0 {
6332
6333
6334 systemstack(func() {
6335 throw("wirep: already in go")
6336 })
6337 }
6338 if pp.m != 0 || pp.status != _Pidle {
6339
6340
6341 systemstack(func() {
6342 id := int64(0)
6343 if pp.m != 0 {
6344 id = pp.m.ptr().id
6345 }
6346 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6347 throw("wirep: invalid p state")
6348 })
6349 }
6350 gp.m.p.set(pp)
6351 pp.m.set(gp.m)
6352 pp.status = _Prunning
6353 }
6354
6355
6356 func releasep() *p {
6357 trace := traceAcquire()
6358 if trace.ok() {
6359 trace.ProcStop(getg().m.p.ptr())
6360 traceRelease(trace)
6361 }
6362 return releasepNoTrace()
6363 }
6364
6365
6366 func releasepNoTrace() *p {
6367 gp := getg()
6368
6369 if gp.m.p == 0 {
6370 throw("releasep: invalid arg")
6371 }
6372 pp := gp.m.p.ptr()
6373 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6374 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6375 throw("releasep: invalid p state")
6376 }
6377
6378
6379 gcController.releaseNextGCMarkWorker(pp)
6380
6381 gp.m.p = 0
6382 pp.m = 0
6383 pp.status = _Pidle
6384 return pp
6385 }
6386
6387 func incidlelocked(v int32) {
6388 lock(&sched.lock)
6389 sched.nmidlelocked += v
6390 if v > 0 {
6391 checkdead()
6392 }
6393 unlock(&sched.lock)
6394 }
6395
6396
6397
6398
6399 func checkdead() {
6400 assertLockHeld(&sched.lock)
6401
6402
6403
6404
6405
6406
6407 if (islibrary || isarchive) && GOARCH != "wasm" {
6408 return
6409 }
6410
6411
6412
6413
6414
6415 if panicking.Load() > 0 {
6416 return
6417 }
6418
6419
6420
6421
6422
6423 var run0 int32
6424 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6425 run0 = 1
6426 }
6427
6428 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6429 if run > run0 {
6430 return
6431 }
6432 if run < 0 {
6433 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6434 unlock(&sched.lock)
6435 throw("checkdead: inconsistent counts")
6436 }
6437
6438 grunning := 0
6439 forEachG(func(gp *g) {
6440 if isSystemGoroutine(gp, false) {
6441 return
6442 }
6443 s := readgstatus(gp)
6444 switch s &^ _Gscan {
6445 case _Gwaiting,
6446 _Gpreempted:
6447 grunning++
6448 case _Grunnable,
6449 _Grunning,
6450 _Gsyscall:
6451 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6452 unlock(&sched.lock)
6453 throw("checkdead: runnable g")
6454 }
6455 })
6456 if grunning == 0 {
6457 unlock(&sched.lock)
6458 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6459 }
6460
6461
6462 if faketime != 0 {
6463 if when := timeSleepUntil(); when < maxWhen {
6464 faketime = when
6465
6466
6467 pp, _ := pidleget(faketime)
6468 if pp == nil {
6469
6470
6471 unlock(&sched.lock)
6472 throw("checkdead: no p for timer")
6473 }
6474 mp := mget()
6475 if mp == nil {
6476
6477
6478 unlock(&sched.lock)
6479 throw("checkdead: no m for timer")
6480 }
6481
6482
6483
6484 sched.nmspinning.Add(1)
6485 mp.spinning = true
6486 mp.nextp.set(pp)
6487 notewakeup(&mp.park)
6488 return
6489 }
6490 }
6491
6492
6493 for _, pp := range allp {
6494 if len(pp.timers.heap) > 0 {
6495 return
6496 }
6497 }
6498
6499 unlock(&sched.lock)
6500 fatal("all goroutines are asleep - deadlock!")
6501 }
6502
6503
6504
6505
6506
6507
6508 var forcegcperiod int64 = 2 * 60 * 1e9
6509
6510
6511
6512
6513 const haveSysmon = GOARCH != "wasm"
6514
6515
6516
6517
6518 func sysmon() {
6519 lock(&sched.lock)
6520 sched.nmsys++
6521 checkdead()
6522 unlock(&sched.lock)
6523
6524 lastgomaxprocs := int64(0)
6525 lasttrace := int64(0)
6526 idle := 0
6527 delay := uint32(0)
6528
6529 for {
6530 if idle == 0 {
6531 delay = 20
6532 } else if idle > 50 {
6533 delay *= 2
6534 }
6535 if delay > 10*1000 {
6536 delay = 10 * 1000
6537 }
6538 usleep(delay)
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555 now := nanotime()
6556 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6557 lock(&sched.lock)
6558 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6559 syscallWake := false
6560 next := timeSleepUntil()
6561 if next > now {
6562 sched.sysmonwait.Store(true)
6563 unlock(&sched.lock)
6564
6565
6566 sleep := forcegcperiod / 2
6567 if next-now < sleep {
6568 sleep = next - now
6569 }
6570 shouldRelax := sleep >= osRelaxMinNS
6571 if shouldRelax {
6572 osRelax(true)
6573 }
6574 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6575 if shouldRelax {
6576 osRelax(false)
6577 }
6578 lock(&sched.lock)
6579 sched.sysmonwait.Store(false)
6580 noteclear(&sched.sysmonnote)
6581 }
6582 if syscallWake {
6583 idle = 0
6584 delay = 20
6585 }
6586 }
6587 unlock(&sched.lock)
6588 }
6589
6590 lock(&sched.sysmonlock)
6591
6592
6593 now = nanotime()
6594
6595
6596 if *cgo_yield != nil {
6597 asmcgocall(*cgo_yield, nil)
6598 }
6599
6600 lastpoll := sched.lastpoll.Load()
6601 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6602 sched.lastpoll.CompareAndSwap(lastpoll, now)
6603 list, delta := netpoll(0)
6604 if !list.empty() {
6605
6606
6607
6608
6609
6610
6611
6612 incidlelocked(-1)
6613 injectglist(&list)
6614 incidlelocked(1)
6615 netpollAdjustWaiters(delta)
6616 }
6617 }
6618
6619 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6620 sysmonUpdateGOMAXPROCS()
6621 lastgomaxprocs = now
6622 }
6623 if scavenger.sysmonWake.Load() != 0 {
6624
6625 scavenger.wake()
6626 }
6627
6628
6629 if retake(now) != 0 {
6630 idle = 0
6631 } else {
6632 idle++
6633 }
6634
6635 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6636 lock(&forcegc.lock)
6637 forcegc.idle.Store(false)
6638 var list gList
6639 list.push(forcegc.g)
6640 injectglist(&list)
6641 unlock(&forcegc.lock)
6642 }
6643 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6644 lasttrace = now
6645 schedtrace(debug.scheddetail > 0)
6646 }
6647 unlock(&sched.sysmonlock)
6648 }
6649 }
6650
6651 type sysmontick struct {
6652 schedtick uint32
6653 syscalltick uint32
6654 schedwhen int64
6655 syscallwhen int64
6656 }
6657
6658
6659
6660 const forcePreemptNS = 10 * 1000 * 1000
6661
6662 func retake(now int64) uint32 {
6663 n := 0
6664
6665
6666 lock(&allpLock)
6667
6668
6669
6670 for i := 0; i < len(allp); i++ {
6671
6672
6673
6674
6675
6676
6677
6678
6679 pp := allp[i]
6680 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6681
6682
6683 continue
6684 }
6685 pd := &pp.sysmontick
6686 sysretake := false
6687
6688
6689
6690
6691
6692 schedt := int64(pp.schedtick)
6693 if int64(pd.schedtick) != schedt {
6694 pd.schedtick = uint32(schedt)
6695 pd.schedwhen = now
6696 } else if pd.schedwhen+forcePreemptNS <= now {
6697 preemptone(pp)
6698
6699
6700
6701
6702 sysretake = true
6703 }
6704
6705
6706 unlock(&allpLock)
6707
6708
6709
6710
6711
6712
6713
6714
6715 incidlelocked(-1)
6716
6717
6718 thread, ok := setBlockOnExitSyscall(pp)
6719 if !ok {
6720
6721 goto done
6722 }
6723
6724
6725 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6726 pd.syscalltick = uint32(syst)
6727 pd.syscallwhen = now
6728 thread.resume()
6729 goto done
6730 }
6731
6732
6733
6734
6735 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6736 thread.resume()
6737 goto done
6738 }
6739
6740
6741
6742 thread.takeP()
6743 thread.resume()
6744 n++
6745
6746
6747 handoffp(pp)
6748
6749
6750
6751 done:
6752 incidlelocked(1)
6753 lock(&allpLock)
6754 }
6755 unlock(&allpLock)
6756 return uint32(n)
6757 }
6758
6759
6760
6761 type syscallingThread struct {
6762 gp *g
6763 mp *m
6764 pp *p
6765 status uint32
6766 }
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6783 if pp.status != _Prunning {
6784 return syscallingThread{}, false
6785 }
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797 mp := pp.m.ptr()
6798 if mp == nil {
6799
6800 return syscallingThread{}, false
6801 }
6802 gp := mp.curg
6803 if gp == nil {
6804
6805 return syscallingThread{}, false
6806 }
6807 status := readgstatus(gp) &^ _Gscan
6808
6809
6810
6811
6812 if status != _Gsyscall && status != _Gdeadextra {
6813
6814 return syscallingThread{}, false
6815 }
6816 if !castogscanstatus(gp, status, status|_Gscan) {
6817
6818 return syscallingThread{}, false
6819 }
6820 if gp.m != mp || gp.m.p.ptr() != pp {
6821
6822 casfrom_Gscanstatus(gp, status|_Gscan, status)
6823 return syscallingThread{}, false
6824 }
6825 return syscallingThread{gp, mp, pp, status}, true
6826 }
6827
6828
6829
6830
6831
6832 func (s syscallingThread) gcstopP() {
6833 assertLockHeld(&sched.lock)
6834
6835 s.releaseP(_Pgcstop)
6836 s.pp.gcStopTime = nanotime()
6837 sched.stopwait--
6838 }
6839
6840
6841
6842 func (s syscallingThread) takeP() {
6843 s.releaseP(_Pidle)
6844 }
6845
6846
6847
6848
6849 func (s syscallingThread) releaseP(state uint32) {
6850 if state != _Pidle && state != _Pgcstop {
6851 throw("attempted to release P into a bad state")
6852 }
6853 trace := traceAcquire()
6854 s.pp.m = 0
6855 s.mp.p = 0
6856 atomic.Store(&s.pp.status, state)
6857 if trace.ok() {
6858 trace.ProcSteal(s.pp)
6859 traceRelease(trace)
6860 }
6861 addGSyscallNoP(s.mp)
6862 s.pp.syscalltick++
6863 }
6864
6865
6866 func (s syscallingThread) resume() {
6867 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6868 }
6869
6870
6871
6872
6873
6874
6875 func preemptall() bool {
6876 res := false
6877 for _, pp := range allp {
6878 if pp.status != _Prunning {
6879 continue
6880 }
6881 if preemptone(pp) {
6882 res = true
6883 }
6884 }
6885 return res
6886 }
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898 func preemptone(pp *p) bool {
6899 mp := pp.m.ptr()
6900 if mp == nil || mp == getg().m {
6901 return false
6902 }
6903 gp := mp.curg
6904 if gp == nil || gp == mp.g0 {
6905 return false
6906 }
6907 if readgstatus(gp)&^_Gscan == _Gsyscall {
6908
6909 return false
6910 }
6911
6912 gp.preempt = true
6913
6914
6915
6916
6917
6918 gp.stackguard0 = stackPreempt
6919
6920
6921 if preemptMSupported && debug.asyncpreemptoff == 0 {
6922 pp.preempt = true
6923 preemptM(mp)
6924 }
6925
6926 return true
6927 }
6928
6929 var starttime int64
6930
6931 func schedtrace(detailed bool) {
6932 now := nanotime()
6933 if starttime == 0 {
6934 starttime = now
6935 }
6936
6937 lock(&sched.lock)
6938 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6939 if detailed {
6940 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6941 }
6942
6943
6944
6945 for i, pp := range allp {
6946 h := atomic.Load(&pp.runqhead)
6947 t := atomic.Load(&pp.runqtail)
6948 if detailed {
6949 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6950 mp := pp.m.ptr()
6951 if mp != nil {
6952 print(mp.id)
6953 } else {
6954 print("nil")
6955 }
6956 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6957 } else {
6958
6959
6960 print(" ")
6961 if i == 0 {
6962 print("[ ")
6963 }
6964 print(t - h)
6965 if i == len(allp)-1 {
6966 print(" ]")
6967 }
6968 }
6969 }
6970
6971 if !detailed {
6972
6973 print(" schedticks=[ ")
6974 for _, pp := range allp {
6975 print(pp.schedtick)
6976 print(" ")
6977 }
6978 print("]\n")
6979 }
6980
6981 if !detailed {
6982 unlock(&sched.lock)
6983 return
6984 }
6985
6986 for mp := allm; mp != nil; mp = mp.alllink {
6987 pp := mp.p.ptr()
6988 print(" M", mp.id, ": p=")
6989 if pp != nil {
6990 print(pp.id)
6991 } else {
6992 print("nil")
6993 }
6994 print(" curg=")
6995 if mp.curg != nil {
6996 print(mp.curg.goid)
6997 } else {
6998 print("nil")
6999 }
7000 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
7001 if lockedg := mp.lockedg.ptr(); lockedg != nil {
7002 print(lockedg.goid)
7003 } else {
7004 print("nil")
7005 }
7006 print("\n")
7007 }
7008
7009 forEachG(func(gp *g) {
7010 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
7011 if gp.m != nil {
7012 print(gp.m.id)
7013 } else {
7014 print("nil")
7015 }
7016 print(" lockedm=")
7017 if lockedm := gp.lockedm.ptr(); lockedm != nil {
7018 print(lockedm.id)
7019 } else {
7020 print("nil")
7021 }
7022 print("\n")
7023 })
7024 unlock(&sched.lock)
7025 }
7026
7027 type updateMaxProcsGState struct {
7028 lock mutex
7029 g *g
7030 idle atomic.Bool
7031
7032
7033 procs int32
7034 }
7035
7036 var (
7037
7038
7039 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7040
7041
7042
7043 updateMaxProcsG updateMaxProcsGState
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092 computeMaxProcsLock mutex
7093 )
7094
7095
7096
7097
7098 func defaultGOMAXPROCSUpdateEnable() {
7099 if debug.updatemaxprocs == 0 {
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111 updatemaxprocs.IncNonDefault()
7112 return
7113 }
7114
7115 go updateMaxProcsGoroutine()
7116 }
7117
7118 func updateMaxProcsGoroutine() {
7119 updateMaxProcsG.g = getg()
7120 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7121 for {
7122 lock(&updateMaxProcsG.lock)
7123 if updateMaxProcsG.idle.Load() {
7124 throw("updateMaxProcsGoroutine: phase error")
7125 }
7126 updateMaxProcsG.idle.Store(true)
7127 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7128
7129
7130 stw := stopTheWorldGC(stwGOMAXPROCS)
7131
7132
7133 lock(&sched.lock)
7134 custom := sched.customGOMAXPROCS
7135 unlock(&sched.lock)
7136 if custom {
7137 startTheWorldGC(stw)
7138 return
7139 }
7140
7141
7142
7143
7144
7145 newprocs = updateMaxProcsG.procs
7146 lock(&sched.lock)
7147 sched.customGOMAXPROCS = false
7148 unlock(&sched.lock)
7149
7150 startTheWorldGC(stw)
7151 }
7152 }
7153
7154 func sysmonUpdateGOMAXPROCS() {
7155
7156 lock(&computeMaxProcsLock)
7157
7158
7159 lock(&sched.lock)
7160 custom := sched.customGOMAXPROCS
7161 curr := gomaxprocs
7162 unlock(&sched.lock)
7163 if custom {
7164 unlock(&computeMaxProcsLock)
7165 return
7166 }
7167
7168
7169 procs := defaultGOMAXPROCS(0)
7170 unlock(&computeMaxProcsLock)
7171 if procs == curr {
7172
7173 return
7174 }
7175
7176
7177
7178
7179 if updateMaxProcsG.idle.Load() {
7180 lock(&updateMaxProcsG.lock)
7181 updateMaxProcsG.procs = procs
7182 updateMaxProcsG.idle.Store(false)
7183 var list gList
7184 list.push(updateMaxProcsG.g)
7185 injectglist(&list)
7186 unlock(&updateMaxProcsG.lock)
7187 }
7188 }
7189
7190
7191
7192
7193
7194
7195 func schedEnableUser(enable bool) {
7196 lock(&sched.lock)
7197 if sched.disable.user == !enable {
7198 unlock(&sched.lock)
7199 return
7200 }
7201 sched.disable.user = !enable
7202 if enable {
7203 n := sched.disable.runnable.size
7204 globrunqputbatch(&sched.disable.runnable)
7205 unlock(&sched.lock)
7206 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7207 startm(nil, false, false)
7208 }
7209 } else {
7210 unlock(&sched.lock)
7211 }
7212 }
7213
7214
7215
7216
7217
7218 func schedEnabled(gp *g) bool {
7219 assertLockHeld(&sched.lock)
7220
7221 if sched.disable.user {
7222 return isSystemGoroutine(gp, true)
7223 }
7224 return true
7225 }
7226
7227
7228
7229
7230
7231
7232 func mput(mp *m) {
7233 assertLockHeld(&sched.lock)
7234
7235 sched.midle.push(unsafe.Pointer(mp))
7236 sched.nmidle++
7237 checkdead()
7238 }
7239
7240
7241
7242
7243
7244
7245 func mget() *m {
7246 assertLockHeld(&sched.lock)
7247
7248 mp := (*m)(sched.midle.pop())
7249 if mp != nil {
7250 sched.nmidle--
7251 }
7252 return mp
7253 }
7254
7255
7256
7257
7258
7259
7260
7261
7262 func mgetSpecific(mp *m) *m {
7263 assertLockHeld(&sched.lock)
7264
7265 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7266
7267 return nil
7268 }
7269
7270 sched.midle.remove(unsafe.Pointer(mp))
7271 sched.nmidle--
7272
7273 return mp
7274 }
7275
7276
7277
7278
7279
7280
7281 func globrunqput(gp *g) {
7282 assertLockHeld(&sched.lock)
7283
7284 sched.runq.pushBack(gp)
7285 }
7286
7287
7288
7289
7290
7291
7292 func globrunqputhead(gp *g) {
7293 assertLockHeld(&sched.lock)
7294
7295 sched.runq.push(gp)
7296 }
7297
7298
7299
7300
7301
7302
7303
7304 func globrunqputbatch(batch *gQueue) {
7305 assertLockHeld(&sched.lock)
7306
7307 sched.runq.pushBackAll(*batch)
7308 *batch = gQueue{}
7309 }
7310
7311
7312
7313 func globrunqget() *g {
7314 assertLockHeld(&sched.lock)
7315
7316 if sched.runq.size == 0 {
7317 return nil
7318 }
7319
7320 return sched.runq.pop()
7321 }
7322
7323
7324
7325 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7326 assertLockHeld(&sched.lock)
7327
7328 if sched.runq.size == 0 {
7329 return
7330 }
7331
7332 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7333
7334 gp = sched.runq.pop()
7335 n--
7336
7337 for ; n > 0; n-- {
7338 gp1 := sched.runq.pop()
7339 q.pushBack(gp1)
7340 }
7341 return
7342 }
7343
7344
7345 type pMask []uint32
7346
7347
7348 func (p pMask) read(id uint32) bool {
7349 word := id / 32
7350 mask := uint32(1) << (id % 32)
7351 return (atomic.Load(&p[word]) & mask) != 0
7352 }
7353
7354
7355 func (p pMask) set(id int32) {
7356 word := id / 32
7357 mask := uint32(1) << (id % 32)
7358 atomic.Or(&p[word], mask)
7359 }
7360
7361
7362 func (p pMask) clear(id int32) {
7363 word := id / 32
7364 mask := uint32(1) << (id % 32)
7365 atomic.And(&p[word], ^mask)
7366 }
7367
7368
7369 func (p pMask) any() bool {
7370 for i := range p {
7371 if atomic.Load(&p[i]) != 0 {
7372 return true
7373 }
7374 }
7375 return false
7376 }
7377
7378
7379
7380
7381
7382 func (p pMask) resize(nprocs int32) pMask {
7383 maskWords := (nprocs + 31) / 32
7384
7385 if maskWords <= int32(cap(p)) {
7386 return p[:maskWords]
7387 }
7388 newMask := make([]uint32, maskWords)
7389
7390 copy(newMask, p)
7391 return newMask
7392 }
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404
7405 func pidleput(pp *p, now int64) int64 {
7406 assertLockHeld(&sched.lock)
7407
7408 if !runqempty(pp) {
7409 throw("pidleput: P has non-empty run queue")
7410 }
7411 if now == 0 {
7412 now = nanotime()
7413 }
7414 if pp.timers.len.Load() == 0 {
7415 timerpMask.clear(pp.id)
7416 }
7417 idlepMask.set(pp.id)
7418 pp.link = sched.pidle
7419 sched.pidle.set(pp)
7420 sched.npidle.Add(1)
7421 if !pp.limiterEvent.start(limiterEventIdle, now) {
7422 throw("must be able to track idle limiter event")
7423 }
7424 return now
7425 }
7426
7427
7428
7429
7430
7431
7432
7433
7434 func pidleget(now int64) (*p, int64) {
7435 assertLockHeld(&sched.lock)
7436
7437 pp := sched.pidle.ptr()
7438 if pp != nil {
7439
7440 if now == 0 {
7441 now = nanotime()
7442 }
7443 timerpMask.set(pp.id)
7444 idlepMask.clear(pp.id)
7445 sched.pidle = pp.link
7446 sched.npidle.Add(-1)
7447 pp.limiterEvent.stop(limiterEventIdle, now)
7448 }
7449 return pp, now
7450 }
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462 func pidlegetSpinning(now int64) (*p, int64) {
7463 assertLockHeld(&sched.lock)
7464
7465 pp, now := pidleget(now)
7466 if pp == nil {
7467
7468
7469
7470 sched.needspinning.Store(1)
7471 return nil, now
7472 }
7473
7474 return pp, now
7475 }
7476
7477
7478
7479 func runqempty(pp *p) bool {
7480
7481
7482
7483
7484 for {
7485 head := atomic.Load(&pp.runqhead)
7486 tail := atomic.Load(&pp.runqtail)
7487 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7488 if tail == atomic.Load(&pp.runqtail) {
7489 return head == tail && runnext == 0
7490 }
7491 }
7492 }
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503 const randomizeScheduler = raceenabled
7504
7505
7506
7507
7508
7509
7510 func runqput(pp *p, gp *g, next bool) {
7511 if !haveSysmon && next {
7512
7513
7514
7515
7516
7517
7518
7519
7520 next = false
7521 }
7522 if randomizeScheduler && next && randn(2) == 0 {
7523 next = false
7524 }
7525
7526 if next {
7527 retryNext:
7528 oldnext := pp.runnext
7529 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7530 goto retryNext
7531 }
7532 if oldnext == 0 {
7533 return
7534 }
7535
7536 gp = oldnext.ptr()
7537 }
7538
7539 retry:
7540 h := atomic.LoadAcq(&pp.runqhead)
7541 t := pp.runqtail
7542 if t-h < uint32(len(pp.runq)) {
7543 pp.runq[t%uint32(len(pp.runq))].set(gp)
7544 atomic.StoreRel(&pp.runqtail, t+1)
7545 return
7546 }
7547 if runqputslow(pp, gp, h, t) {
7548 return
7549 }
7550
7551 goto retry
7552 }
7553
7554
7555
7556 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7557 var batch [len(pp.runq)/2 + 1]*g
7558
7559
7560 n := t - h
7561 n = n / 2
7562 if n != uint32(len(pp.runq)/2) {
7563 throw("runqputslow: queue is not full")
7564 }
7565 for i := uint32(0); i < n; i++ {
7566 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7567 }
7568 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7569 return false
7570 }
7571 batch[n] = gp
7572
7573 if randomizeScheduler {
7574 for i := uint32(1); i <= n; i++ {
7575 j := cheaprandn(i + 1)
7576 batch[i], batch[j] = batch[j], batch[i]
7577 }
7578 }
7579
7580
7581 for i := uint32(0); i < n; i++ {
7582 batch[i].schedlink.set(batch[i+1])
7583 }
7584
7585 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7586
7587
7588 lock(&sched.lock)
7589 globrunqputbatch(&q)
7590 unlock(&sched.lock)
7591 return true
7592 }
7593
7594
7595
7596
7597 func runqputbatch(pp *p, q *gQueue) {
7598 if q.empty() {
7599 return
7600 }
7601 h := atomic.LoadAcq(&pp.runqhead)
7602 t := pp.runqtail
7603 n := uint32(0)
7604 for !q.empty() && t-h < uint32(len(pp.runq)) {
7605 gp := q.pop()
7606 pp.runq[t%uint32(len(pp.runq))].set(gp)
7607 t++
7608 n++
7609 }
7610
7611 if randomizeScheduler {
7612 off := func(o uint32) uint32 {
7613 return (pp.runqtail + o) % uint32(len(pp.runq))
7614 }
7615 for i := uint32(1); i < n; i++ {
7616 j := cheaprandn(i + 1)
7617 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7618 }
7619 }
7620
7621 atomic.StoreRel(&pp.runqtail, t)
7622
7623 return
7624 }
7625
7626
7627
7628
7629
7630 func runqget(pp *p) (gp *g, inheritTime bool) {
7631
7632 next := pp.runnext
7633
7634
7635
7636 if next != 0 && pp.runnext.cas(next, 0) {
7637 return next.ptr(), true
7638 }
7639
7640 for {
7641 h := atomic.LoadAcq(&pp.runqhead)
7642 t := pp.runqtail
7643 if t == h {
7644 return nil, false
7645 }
7646 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7647 if atomic.CasRel(&pp.runqhead, h, h+1) {
7648 return gp, false
7649 }
7650 }
7651 }
7652
7653
7654
7655 func runqdrain(pp *p) (drainQ gQueue) {
7656 oldNext := pp.runnext
7657 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7658 drainQ.pushBack(oldNext.ptr())
7659 }
7660
7661 retry:
7662 h := atomic.LoadAcq(&pp.runqhead)
7663 t := pp.runqtail
7664 qn := t - h
7665 if qn == 0 {
7666 return
7667 }
7668 if qn > uint32(len(pp.runq)) {
7669 goto retry
7670 }
7671
7672 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7673 goto retry
7674 }
7675
7676
7677
7678
7679
7680
7681
7682
7683 for i := uint32(0); i < qn; i++ {
7684 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7685 drainQ.pushBack(gp)
7686 }
7687 return
7688 }
7689
7690
7691
7692
7693
7694 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7695 for {
7696 h := atomic.LoadAcq(&pp.runqhead)
7697 t := atomic.LoadAcq(&pp.runqtail)
7698 n := t - h
7699 n = n - n/2
7700 if n == 0 {
7701 if stealRunNextG {
7702
7703 if next := pp.runnext; next != 0 {
7704 if pp.status == _Prunning {
7705 if mp := pp.m.ptr(); mp != nil {
7706 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726 if !osHasLowResTimer {
7727 usleep(3)
7728 } else {
7729
7730
7731
7732 osyield()
7733 }
7734 }
7735 }
7736 }
7737 if !pp.runnext.cas(next, 0) {
7738 continue
7739 }
7740 batch[batchHead%uint32(len(batch))] = next
7741 return 1
7742 }
7743 }
7744 return 0
7745 }
7746 if n > uint32(len(pp.runq)/2) {
7747 continue
7748 }
7749 for i := uint32(0); i < n; i++ {
7750 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7751 batch[(batchHead+i)%uint32(len(batch))] = g
7752 }
7753 if atomic.CasRel(&pp.runqhead, h, h+n) {
7754 return n
7755 }
7756 }
7757 }
7758
7759
7760
7761
7762 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7763 t := pp.runqtail
7764 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7765 if n == 0 {
7766 return nil
7767 }
7768 n--
7769 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7770 if n == 0 {
7771 return gp
7772 }
7773 h := atomic.LoadAcq(&pp.runqhead)
7774 if t-h+n >= uint32(len(pp.runq)) {
7775 throw("runqsteal: runq overflow")
7776 }
7777 atomic.StoreRel(&pp.runqtail, t+n)
7778 return gp
7779 }
7780
7781
7782
7783 type gQueue struct {
7784 head guintptr
7785 tail guintptr
7786 size int32
7787 }
7788
7789
7790 func (q *gQueue) empty() bool {
7791 return q.head == 0
7792 }
7793
7794
7795 func (q *gQueue) push(gp *g) {
7796 gp.schedlink = q.head
7797 q.head.set(gp)
7798 if q.tail == 0 {
7799 q.tail.set(gp)
7800 }
7801 q.size++
7802 }
7803
7804
7805 func (q *gQueue) pushBack(gp *g) {
7806 gp.schedlink = 0
7807 if q.tail != 0 {
7808 q.tail.ptr().schedlink.set(gp)
7809 } else {
7810 q.head.set(gp)
7811 }
7812 q.tail.set(gp)
7813 q.size++
7814 }
7815
7816
7817
7818 func (q *gQueue) pushBackAll(q2 gQueue) {
7819 if q2.tail == 0 {
7820 return
7821 }
7822 q2.tail.ptr().schedlink = 0
7823 if q.tail != 0 {
7824 q.tail.ptr().schedlink = q2.head
7825 } else {
7826 q.head = q2.head
7827 }
7828 q.tail = q2.tail
7829 q.size += q2.size
7830 }
7831
7832
7833
7834 func (q *gQueue) pop() *g {
7835 gp := q.head.ptr()
7836 if gp != nil {
7837 q.head = gp.schedlink
7838 if q.head == 0 {
7839 q.tail = 0
7840 }
7841 q.size--
7842 }
7843 return gp
7844 }
7845
7846
7847 func (q *gQueue) popList() gList {
7848 stack := gList{q.head, q.size}
7849 *q = gQueue{}
7850 return stack
7851 }
7852
7853
7854
7855 type gList struct {
7856 head guintptr
7857 size int32
7858 }
7859
7860
7861 func (l *gList) empty() bool {
7862 return l.head == 0
7863 }
7864
7865
7866 func (l *gList) push(gp *g) {
7867 gp.schedlink = l.head
7868 l.head.set(gp)
7869 l.size++
7870 }
7871
7872
7873 func (l *gList) pushAll(q gQueue) {
7874 if !q.empty() {
7875 q.tail.ptr().schedlink = l.head
7876 l.head = q.head
7877 l.size += q.size
7878 }
7879 }
7880
7881
7882 func (l *gList) pop() *g {
7883 gp := l.head.ptr()
7884 if gp != nil {
7885 l.head = gp.schedlink
7886 l.size--
7887 }
7888 return gp
7889 }
7890
7891
7892 func setMaxThreads(in int) (out int) {
7893 lock(&sched.lock)
7894 out = int(sched.maxmcount)
7895 if in > 0x7fffffff {
7896 sched.maxmcount = 0x7fffffff
7897 } else {
7898 sched.maxmcount = int32(in)
7899 }
7900 checkmcount()
7901 unlock(&sched.lock)
7902 return
7903 }
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917 func procPin() int {
7918 gp := getg()
7919 mp := gp.m
7920
7921 mp.locks++
7922 return int(mp.p.ptr().id)
7923 }
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937 func procUnpin() {
7938 gp := getg()
7939 gp.m.locks--
7940 }
7941
7942
7943
7944 func sync_runtime_procPin() int {
7945 return procPin()
7946 }
7947
7948
7949
7950 func sync_runtime_procUnpin() {
7951 procUnpin()
7952 }
7953
7954
7955
7956 func sync_atomic_runtime_procPin() int {
7957 return procPin()
7958 }
7959
7960
7961
7962 func sync_atomic_runtime_procUnpin() {
7963 procUnpin()
7964 }
7965
7966
7967
7968
7969
7970 func internal_sync_runtime_canSpin(i int) bool {
7971
7972
7973
7974
7975
7976 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7977 return false
7978 }
7979 if p := getg().m.p.ptr(); !runqempty(p) {
7980 return false
7981 }
7982 return true
7983 }
7984
7985
7986
7987 func internal_sync_runtime_doSpin() {
7988 procyield(active_spin_cnt)
7989 }
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
8002
8003
8004
8005 func sync_runtime_canSpin(i int) bool {
8006 return internal_sync_runtime_canSpin(i)
8007 }
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019
8020
8021 func sync_runtime_doSpin() {
8022 internal_sync_runtime_doSpin()
8023 }
8024
8025 var stealOrder randomOrder
8026
8027
8028
8029
8030
8031 type randomOrder struct {
8032 count uint32
8033 coprimes []uint32
8034 }
8035
8036 type randomEnum struct {
8037 i uint32
8038 count uint32
8039 pos uint32
8040 inc uint32
8041 }
8042
8043 func (ord *randomOrder) reset(count uint32) {
8044 ord.count = count
8045 ord.coprimes = ord.coprimes[:0]
8046 for i := uint32(1); i <= count; i++ {
8047 if gcd(i, count) == 1 {
8048 ord.coprimes = append(ord.coprimes, i)
8049 }
8050 }
8051 }
8052
8053 func (ord *randomOrder) start(i uint32) randomEnum {
8054 return randomEnum{
8055 count: ord.count,
8056 pos: i % ord.count,
8057 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8058 }
8059 }
8060
8061 func (enum *randomEnum) done() bool {
8062 return enum.i == enum.count
8063 }
8064
8065 func (enum *randomEnum) next() {
8066 enum.i++
8067 enum.pos = (enum.pos + enum.inc) % enum.count
8068 }
8069
8070 func (enum *randomEnum) position() uint32 {
8071 return enum.pos
8072 }
8073
8074 func gcd(a, b uint32) uint32 {
8075 for b != 0 {
8076 a, b = b, a%b
8077 }
8078 return a
8079 }
8080
8081
8082
8083 type initTask struct {
8084 state uint32
8085 nfns uint32
8086
8087 }
8088
8089
8090
8091 var inittrace tracestat
8092
8093 type tracestat struct {
8094 active bool
8095 id uint64
8096 allocs uint64
8097 bytes uint64
8098 }
8099
8100 func doInit(ts []*initTask) {
8101 for _, t := range ts {
8102 doInit1(t)
8103 }
8104 }
8105
8106 func doInit1(t *initTask) {
8107 switch t.state {
8108 case 2:
8109 return
8110 case 1:
8111 throw("recursive call during initialization - linker skew")
8112 default:
8113 t.state = 1
8114
8115 var (
8116 start int64
8117 before tracestat
8118 )
8119
8120 if inittrace.active {
8121 start = nanotime()
8122
8123 before = inittrace
8124 }
8125
8126 if t.nfns == 0 {
8127
8128 throw("inittask with no functions")
8129 }
8130
8131 firstFunc := add(unsafe.Pointer(t), 8)
8132 for i := uint32(0); i < t.nfns; i++ {
8133 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8134 f := *(*func())(unsafe.Pointer(&p))
8135 f()
8136 }
8137
8138 if inittrace.active {
8139 end := nanotime()
8140
8141 after := inittrace
8142
8143 f := *(*func())(unsafe.Pointer(&firstFunc))
8144 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8145
8146 var sbuf [24]byte
8147 print("init ", pkg, " @")
8148 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8149 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8150 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8151 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8152 print("\n")
8153 }
8154
8155 t.state = 2
8156 }
8157 }
8158
View as plain text