Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/strconv"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291
292
293
294
295
296
297
298 exitHooksRun := false
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324 if raceenabled {
325 racefini()
326 }
327
328 exit(0)
329 for {
330 var x *int32
331 *x = 0
332 }
333 }
334
335
336
337
338 func os_beforeExit(exitCode int) {
339 runExitHooks(exitCode)
340 if exitCode == 0 && raceenabled {
341 racefini()
342 }
343
344
345 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
346 lsandoleakcheck()
347 }
348 }
349
350 func init() {
351 exithook.Gosched = Gosched
352 exithook.Goid = func() uint64 { return getg().goid }
353 exithook.Throw = throw
354 }
355
356 func runExitHooks(code int) {
357 exithook.Run(code)
358 }
359
360
361 func init() {
362 go forcegchelper()
363 }
364
365 func forcegchelper() {
366 forcegc.g = getg()
367 lockInit(&forcegc.lock, lockRankForcegc)
368 for {
369 lock(&forcegc.lock)
370 if forcegc.idle.Load() {
371 throw("forcegc: phase error")
372 }
373 forcegc.idle.Store(true)
374 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
375
376 if debug.gctrace > 0 {
377 println("GC forced")
378 }
379
380 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
381 }
382 }
383
384
385
386
387
388 func Gosched() {
389 checkTimeouts()
390 mcall(gosched_m)
391 }
392
393
394
395
396
397 func goschedguarded() {
398 mcall(goschedguarded_m)
399 }
400
401
402
403
404
405
406 func goschedIfBusy() {
407 gp := getg()
408
409
410 if !gp.preempt && sched.npidle.Load() > 0 {
411 return
412 }
413 mcall(gosched_m)
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
445 if reason != waitReasonSleep {
446 checkTimeouts()
447 }
448 mp := acquirem()
449 gp := mp.curg
450 status := readgstatus(gp)
451 if status != _Grunning && status != _Gscanrunning {
452 throw("gopark: bad g status")
453 }
454 mp.waitlock = lock
455 mp.waitunlockf = unlockf
456 gp.waitreason = reason
457 mp.waitTraceBlockReason = traceReason
458 mp.waitTraceSkip = traceskip
459 releasem(mp)
460
461 mcall(park_m)
462 }
463
464
465
466 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
467 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
468 }
469
470
471
472
473
474
475
476
477
478
479
480 func goready(gp *g, traceskip int) {
481 systemstack(func() {
482 ready(gp, traceskip, true)
483 })
484 }
485
486
487 func acquireSudog() *sudog {
488
489
490
491
492
493
494
495
496 mp := acquirem()
497 pp := mp.p.ptr()
498 if len(pp.sudogcache) == 0 {
499 lock(&sched.sudoglock)
500
501 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
502 s := sched.sudogcache
503 sched.sudogcache = s.next
504 s.next = nil
505 pp.sudogcache = append(pp.sudogcache, s)
506 }
507 unlock(&sched.sudoglock)
508
509 if len(pp.sudogcache) == 0 {
510 pp.sudogcache = append(pp.sudogcache, new(sudog))
511 }
512 }
513 n := len(pp.sudogcache)
514 s := pp.sudogcache[n-1]
515 pp.sudogcache[n-1] = nil
516 pp.sudogcache = pp.sudogcache[:n-1]
517 if s.elem.get() != nil {
518 throw("acquireSudog: found s.elem != nil in cache")
519 }
520 releasem(mp)
521 return s
522 }
523
524
525 func releaseSudog(s *sudog) {
526 if s.elem.get() != nil {
527 throw("runtime: sudog with non-nil elem")
528 }
529 if s.isSelect {
530 throw("runtime: sudog with non-false isSelect")
531 }
532 if s.next != nil {
533 throw("runtime: sudog with non-nil next")
534 }
535 if s.prev != nil {
536 throw("runtime: sudog with non-nil prev")
537 }
538 if s.waitlink != nil {
539 throw("runtime: sudog with non-nil waitlink")
540 }
541 if s.c.get() != nil {
542 throw("runtime: sudog with non-nil c")
543 }
544 gp := getg()
545 if gp.param != nil {
546 throw("runtime: releaseSudog with non-nil gp.param")
547 }
548 mp := acquirem()
549 pp := mp.p.ptr()
550 if len(pp.sudogcache) == cap(pp.sudogcache) {
551
552 var first, last *sudog
553 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
554 n := len(pp.sudogcache)
555 p := pp.sudogcache[n-1]
556 pp.sudogcache[n-1] = nil
557 pp.sudogcache = pp.sudogcache[:n-1]
558 if first == nil {
559 first = p
560 } else {
561 last.next = p
562 }
563 last = p
564 }
565 lock(&sched.sudoglock)
566 last.next = sched.sudogcache
567 sched.sudogcache = first
568 unlock(&sched.sudoglock)
569 }
570 pp.sudogcache = append(pp.sudogcache, s)
571 releasem(mp)
572 }
573
574
575 func badmcall(fn func(*g)) {
576 throw("runtime: mcall called on m->g0 stack")
577 }
578
579 func badmcall2(fn func(*g)) {
580 throw("runtime: mcall function returned")
581 }
582
583 func badreflectcall() {
584 panic(plainError("arg size to reflect.call more than 1GB"))
585 }
586
587
588
589 func badmorestackg0() {
590 if !crashStackImplemented {
591 writeErrStr("fatal: morestack on g0\n")
592 return
593 }
594
595 g := getg()
596 switchToCrashStack(func() {
597 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
598 g.m.traceback = 2
599 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
600 print("\n")
601
602 throw("morestack on g0")
603 })
604 }
605
606
607
608 func badmorestackgsignal() {
609 writeErrStr("fatal: morestack on gsignal\n")
610 }
611
612
613 func badctxt() {
614 throw("ctxt != 0")
615 }
616
617
618
619 var gcrash g
620
621 var crashingG atomic.Pointer[g]
622
623
624
625
626
627
628
629
630
631 func switchToCrashStack(fn func()) {
632 me := getg()
633 if crashingG.CompareAndSwapNoWB(nil, me) {
634 switchToCrashStack0(fn)
635 abort()
636 }
637 if crashingG.Load() == me {
638
639 writeErrStr("fatal: recursive switchToCrashStack\n")
640 abort()
641 }
642
643 usleep_no_g(100)
644 writeErrStr("fatal: concurrent switchToCrashStack\n")
645 abort()
646 }
647
648
649
650
651 const crashStackImplemented = GOOS != "windows"
652
653
654 func switchToCrashStack0(fn func())
655
656 func lockedOSThread() bool {
657 gp := getg()
658 return gp.lockedm != 0 && gp.m.lockedg != 0
659 }
660
661 var (
662
663
664
665
666
667
668 allglock mutex
669 allgs []*g
670
671
672
673
674
675
676
677
678
679
680
681
682
683 allglen uintptr
684 allgptr **g
685 )
686
687 func allgadd(gp *g) {
688 if readgstatus(gp) == _Gidle {
689 throw("allgadd: bad status Gidle")
690 }
691
692 lock(&allglock)
693 allgs = append(allgs, gp)
694 if &allgs[0] != allgptr {
695 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
696 }
697 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
698 unlock(&allglock)
699 }
700
701
702
703
704 func allGsSnapshot() []*g {
705 assertWorldStoppedOrLockHeld(&allglock)
706
707
708
709
710
711
712 return allgs[:len(allgs):len(allgs)]
713 }
714
715
716 func atomicAllG() (**g, uintptr) {
717 length := atomic.Loaduintptr(&allglen)
718 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
719 return ptr, length
720 }
721
722
723 func atomicAllGIndex(ptr **g, i uintptr) *g {
724 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
725 }
726
727
728
729
730 func forEachG(fn func(gp *g)) {
731 lock(&allglock)
732 for _, gp := range allgs {
733 fn(gp)
734 }
735 unlock(&allglock)
736 }
737
738
739
740
741
742 func forEachGRace(fn func(gp *g)) {
743 ptr, length := atomicAllG()
744 for i := uintptr(0); i < length; i++ {
745 gp := atomicAllGIndex(ptr, i)
746 fn(gp)
747 }
748 return
749 }
750
751 const (
752
753
754 _GoidCacheBatch = 16
755 )
756
757
758
759 func cpuinit(env string) {
760 cpu.Initialize(env)
761
762
763
764 switch GOARCH {
765 case "386", "amd64":
766 x86HasAVX = cpu.X86.HasAVX
767 x86HasFMA = cpu.X86.HasFMA
768 x86HasPOPCNT = cpu.X86.HasPOPCNT
769 x86HasSSE41 = cpu.X86.HasSSE41
770
771 case "arm":
772 armHasVFPv4 = cpu.ARM.HasVFPv4
773
774 case "arm64":
775 arm64HasATOMICS = cpu.ARM64.HasATOMICS
776
777 case "loong64":
778 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
779 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
780 loong64HasLSX = cpu.Loong64.HasLSX
781
782 case "riscv64":
783 riscv64HasZbb = cpu.RISCV64.HasZbb
784 }
785 }
786
787
788
789
790
791
792 func getGodebugEarly() (string, bool) {
793 const prefix = "GODEBUG="
794 var env string
795 switch GOOS {
796 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
797
798
799
800 n := int32(0)
801 for argv_index(argv, argc+1+n) != nil {
802 n++
803 }
804
805 for i := int32(0); i < n; i++ {
806 p := argv_index(argv, argc+1+i)
807 s := unsafe.String(p, findnull(p))
808
809 if stringslite.HasPrefix(s, prefix) {
810 env = gostringnocopy(p)[len(prefix):]
811 break
812 }
813 }
814 break
815
816 default:
817 return "", false
818 }
819 return env, true
820 }
821
822
823
824
825
826
827
828
829
830 func schedinit() {
831 lockInit(&sched.lock, lockRankSched)
832 lockInit(&sched.sysmonlock, lockRankSysmon)
833 lockInit(&sched.deferlock, lockRankDefer)
834 lockInit(&sched.sudoglock, lockRankSudog)
835 lockInit(&deadlock, lockRankDeadlock)
836 lockInit(&paniclk, lockRankPanic)
837 lockInit(&allglock, lockRankAllg)
838 lockInit(&allpLock, lockRankAllp)
839 lockInit(&reflectOffs.lock, lockRankReflectOffs)
840 lockInit(&finlock, lockRankFin)
841 lockInit(&cpuprof.lock, lockRankCpuprof)
842 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
843 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
844 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
845 traceLockInit()
846
847
848
849 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
850
851 lockVerifyMSize()
852
853 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
854
855
856
857 gp := getg()
858 if raceenabled {
859 gp.racectx, raceprocctx0 = raceinit()
860 }
861
862 sched.maxmcount = 10000
863 crashFD.Store(^uintptr(0))
864
865
866 worldStopped()
867
868 godebug, parsedGodebug := getGodebugEarly()
869 if parsedGodebug {
870 parseRuntimeDebugVars(godebug)
871 }
872 ticks.init()
873 moduledataverify()
874 stackinit()
875 randinit()
876 mallocinit()
877 cpuinit(godebug)
878 alginit()
879 mcommoninit(gp.m, -1)
880 modulesinit()
881 typelinksinit()
882 itabsinit()
883 stkobjinit()
884
885 sigsave(&gp.m.sigmask)
886 initSigmask = gp.m.sigmask
887
888 goargs()
889 goenvs()
890 secure()
891 checkfds()
892 if !parsedGodebug {
893
894
895 parseRuntimeDebugVars(gogetenv("GODEBUG"))
896 }
897 finishDebugVarsSetup()
898 gcinit()
899
900
901
902 gcrash.stack = stackalloc(16384)
903 gcrash.stackguard0 = gcrash.stack.lo + 1000
904 gcrash.stackguard1 = gcrash.stack.lo + 1000
905
906
907
908
909
910 if disableMemoryProfiling {
911 MemProfileRate = 0
912 }
913
914
915 mProfStackInit(gp.m)
916 defaultGOMAXPROCSInit()
917
918 lock(&sched.lock)
919 sched.lastpoll.Store(nanotime())
920 var procs int32
921 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
922 procs = int32(n)
923 sched.customGOMAXPROCS = true
924 } else {
925
926
927
928
929
930
931
932
933 procs = defaultGOMAXPROCS(numCPUStartup)
934 }
935 if procresize(procs) != nil {
936 throw("unknown runnable goroutine during bootstrap")
937 }
938 unlock(&sched.lock)
939
940
941 worldStarted()
942
943 if buildVersion == "" {
944
945
946 buildVersion = "unknown"
947 }
948 if len(modinfo) == 1 {
949
950
951 modinfo = ""
952 }
953 }
954
955 func dumpgstatus(gp *g) {
956 thisg := getg()
957 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
958 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
959 }
960
961
962 func checkmcount() {
963 assertLockHeld(&sched.lock)
964
965
966
967
968
969
970
971
972
973 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
974 if count > sched.maxmcount {
975 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
976 throw("thread exhaustion")
977 }
978 }
979
980
981
982
983
984 func mReserveID() int64 {
985 assertLockHeld(&sched.lock)
986
987 if sched.mnext+1 < sched.mnext {
988 throw("runtime: thread ID overflow")
989 }
990 id := sched.mnext
991 sched.mnext++
992 checkmcount()
993 return id
994 }
995
996
997 func mcommoninit(mp *m, id int64) {
998 gp := getg()
999
1000
1001 if gp != gp.m.g0 {
1002 callers(1, mp.createstack[:])
1003 }
1004
1005 lock(&sched.lock)
1006
1007 if id >= 0 {
1008 mp.id = id
1009 } else {
1010 mp.id = mReserveID()
1011 }
1012
1013 mp.self = newMWeakPointer(mp)
1014
1015 mrandinit(mp)
1016
1017 mpreinit(mp)
1018 if mp.gsignal != nil {
1019 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1020 }
1021
1022
1023
1024 mp.alllink = allm
1025
1026
1027
1028 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1029 unlock(&sched.lock)
1030
1031
1032 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1033 mp.cgoCallers = new(cgoCallers)
1034 }
1035 mProfStackInit(mp)
1036 }
1037
1038
1039
1040
1041
1042 func mProfStackInit(mp *m) {
1043 if debug.profstackdepth == 0 {
1044
1045
1046 return
1047 }
1048 mp.profStack = makeProfStackFP()
1049 mp.mLockProfile.stack = makeProfStackFP()
1050 }
1051
1052
1053
1054
1055 func makeProfStackFP() []uintptr {
1056
1057
1058
1059
1060
1061
1062 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1063 }
1064
1065
1066
1067 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1068
1069
1070 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1071
1072 func (mp *m) becomeSpinning() {
1073 mp.spinning = true
1074 sched.nmspinning.Add(1)
1075 sched.needspinning.Store(0)
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085 func (mp *m) snapshotAllp() []*p {
1086 mp.allpSnapshot = allp
1087 return mp.allpSnapshot
1088 }
1089
1090
1091
1092
1093
1094
1095
1096 func (mp *m) clearAllpSnapshot() {
1097 mp.allpSnapshot = nil
1098 }
1099
1100 func (mp *m) hasCgoOnStack() bool {
1101 return mp.ncgo > 0 || mp.isextra
1102 }
1103
1104 const (
1105
1106
1107 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1108
1109
1110
1111 osHasLowResClockInt = goos.IsWindows
1112
1113
1114
1115 osHasLowResClock = osHasLowResClockInt > 0
1116 )
1117
1118
1119 func ready(gp *g, traceskip int, next bool) {
1120 status := readgstatus(gp)
1121
1122
1123 mp := acquirem()
1124 if status&^_Gscan != _Gwaiting {
1125 dumpgstatus(gp)
1126 throw("bad g->status in ready")
1127 }
1128
1129
1130 trace := traceAcquire()
1131 casgstatus(gp, _Gwaiting, _Grunnable)
1132 if trace.ok() {
1133 trace.GoUnpark(gp, traceskip)
1134 traceRelease(trace)
1135 }
1136 runqput(mp.p.ptr(), gp, next)
1137 wakep()
1138 releasem(mp)
1139 }
1140
1141
1142
1143 const freezeStopWait = 0x7fffffff
1144
1145
1146
1147 var freezing atomic.Bool
1148
1149
1150
1151
1152 func freezetheworld() {
1153 freezing.Store(true)
1154 if debug.dontfreezetheworld > 0 {
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 usleep(1000)
1180 return
1181 }
1182
1183
1184
1185
1186 for i := 0; i < 5; i++ {
1187
1188 sched.stopwait = freezeStopWait
1189 sched.gcwaiting.Store(true)
1190
1191 if !preemptall() {
1192 break
1193 }
1194 usleep(1000)
1195 }
1196
1197 usleep(1000)
1198 preemptall()
1199 usleep(1000)
1200 }
1201
1202
1203
1204
1205
1206 func readgstatus(gp *g) uint32 {
1207 return gp.atomicstatus.Load()
1208 }
1209
1210
1211
1212
1213
1214 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1215 success := false
1216
1217
1218 switch oldval {
1219 default:
1220 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1221 dumpgstatus(gp)
1222 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1223 case _Gscanrunnable,
1224 _Gscanwaiting,
1225 _Gscanrunning,
1226 _Gscansyscall,
1227 _Gscanleaked,
1228 _Gscanpreempted,
1229 _Gscandeadextra:
1230 if newval == oldval&^_Gscan {
1231 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1232 }
1233 }
1234 if !success {
1235 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1236 dumpgstatus(gp)
1237 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1238 }
1239 releaseLockRankAndM(lockRankGscan)
1240 }
1241
1242
1243
1244 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1245 switch oldval {
1246 case _Grunnable,
1247 _Grunning,
1248 _Gwaiting,
1249 _Gleaked,
1250 _Gsyscall,
1251 _Gdeadextra:
1252 if newval == oldval|_Gscan {
1253 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1254 if r {
1255 acquireLockRankAndM(lockRankGscan)
1256 }
1257 return r
1258
1259 }
1260 }
1261 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1262 throw("bad oldval passed to castogscanstatus")
1263 return false
1264 }
1265
1266
1267
1268 var casgstatusAlwaysTrack = false
1269
1270
1271
1272
1273
1274
1275
1276 func casgstatus(gp *g, oldval, newval uint32) {
1277 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1278 systemstack(func() {
1279
1280
1281 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1282 throw("casgstatus: bad incoming values")
1283 })
1284 }
1285
1286 lockWithRankMayAcquire(nil, lockRankGscan)
1287
1288
1289 const yieldDelay = 5 * 1000
1290 var nextYield int64
1291
1292
1293
1294 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1295 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1296 systemstack(func() {
1297
1298
1299 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1300 })
1301 }
1302 if i == 0 {
1303 nextYield = nanotime() + yieldDelay
1304 }
1305 if nanotime() < nextYield {
1306 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1307 procyield(1)
1308 }
1309 } else {
1310 osyield()
1311 nextYield = nanotime() + yieldDelay/2
1312 }
1313 }
1314
1315 if gp.bubble != nil {
1316 systemstack(func() {
1317 gp.bubble.changegstatus(gp, oldval, newval)
1318 })
1319 }
1320
1321 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1322
1323
1324 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1325 gp.tracking = true
1326 }
1327 gp.trackingSeq++
1328 }
1329 if !gp.tracking {
1330 return
1331 }
1332
1333
1334
1335
1336
1337
1338 switch oldval {
1339 case _Grunnable:
1340
1341
1342
1343 now := nanotime()
1344 gp.runnableTime += now - gp.trackingStamp
1345 gp.trackingStamp = 0
1346 case _Gwaiting:
1347 if !gp.waitreason.isMutexWait() {
1348
1349 break
1350 }
1351
1352
1353
1354
1355
1356 now := nanotime()
1357 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1358 gp.trackingStamp = 0
1359 }
1360 switch newval {
1361 case _Gwaiting:
1362 if !gp.waitreason.isMutexWait() {
1363
1364 break
1365 }
1366
1367 now := nanotime()
1368 gp.trackingStamp = now
1369 case _Grunnable:
1370
1371
1372 now := nanotime()
1373 gp.trackingStamp = now
1374 case _Grunning:
1375
1376
1377
1378 gp.tracking = false
1379 sched.timeToRun.record(gp.runnableTime)
1380 gp.runnableTime = 0
1381 }
1382 }
1383
1384
1385
1386
1387 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1388
1389 gp.waitreason = reason
1390 casgstatus(gp, old, _Gwaiting)
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1401 if !reason.isWaitingForSuspendG() {
1402 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1403 }
1404 casGToWaiting(gp, old, reason)
1405 }
1406
1407
1408
1409
1410
1411 func casGToPreemptScan(gp *g, old, new uint32) {
1412 if old != _Grunning || new != _Gscan|_Gpreempted {
1413 throw("bad g transition")
1414 }
1415 acquireLockRankAndM(lockRankGscan)
1416 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1417 }
1418
1419
1420
1421
1422
1423
1424 }
1425
1426
1427
1428
1429 func casGFromPreempted(gp *g, old, new uint32) bool {
1430 if old != _Gpreempted || new != _Gwaiting {
1431 throw("bad g transition")
1432 }
1433 gp.waitreason = waitReasonPreempted
1434 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1435 return false
1436 }
1437 if bubble := gp.bubble; bubble != nil {
1438 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1439 }
1440 return true
1441 }
1442
1443
1444 type stwReason uint8
1445
1446
1447
1448
1449 const (
1450 stwUnknown stwReason = iota
1451 stwGCMarkTerm
1452 stwGCSweepTerm
1453 stwWriteHeapDump
1454 stwGoroutineProfile
1455 stwGoroutineProfileCleanup
1456 stwAllGoroutinesStack
1457 stwReadMemStats
1458 stwAllThreadsSyscall
1459 stwGOMAXPROCS
1460 stwStartTrace
1461 stwStopTrace
1462 stwForTestCountPagesInUse
1463 stwForTestReadMetricsSlow
1464 stwForTestReadMemStatsSlow
1465 stwForTestPageCachePagesLeaked
1466 stwForTestResetDebugLog
1467 )
1468
1469 func (r stwReason) String() string {
1470 return stwReasonStrings[r]
1471 }
1472
1473 func (r stwReason) isGC() bool {
1474 return r == stwGCMarkTerm || r == stwGCSweepTerm
1475 }
1476
1477
1478
1479
1480 var stwReasonStrings = [...]string{
1481 stwUnknown: "unknown",
1482 stwGCMarkTerm: "GC mark termination",
1483 stwGCSweepTerm: "GC sweep termination",
1484 stwWriteHeapDump: "write heap dump",
1485 stwGoroutineProfile: "goroutine profile",
1486 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1487 stwAllGoroutinesStack: "all goroutines stack trace",
1488 stwReadMemStats: "read mem stats",
1489 stwAllThreadsSyscall: "AllThreadsSyscall",
1490 stwGOMAXPROCS: "GOMAXPROCS",
1491 stwStartTrace: "start trace",
1492 stwStopTrace: "stop trace",
1493 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1494 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1495 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1496 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1497 stwForTestResetDebugLog: "ResetDebugLog (test)",
1498 }
1499
1500
1501
1502 type worldStop struct {
1503 reason stwReason
1504 startedStopping int64
1505 finishedStopping int64
1506 stoppingCPUTime int64
1507 }
1508
1509
1510
1511
1512 var stopTheWorldContext worldStop
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 func stopTheWorld(reason stwReason) worldStop {
1532 semacquire(&worldsema)
1533 gp := getg()
1534 gp.m.preemptoff = reason.String()
1535 systemstack(func() {
1536 stopTheWorldContext = stopTheWorldWithSema(reason)
1537 })
1538 return stopTheWorldContext
1539 }
1540
1541
1542
1543
1544 func startTheWorld(w worldStop) {
1545 systemstack(func() { startTheWorldWithSema(0, w) })
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 mp := acquirem()
1563 mp.preemptoff = ""
1564 semrelease1(&worldsema, true, 0)
1565 releasem(mp)
1566 }
1567
1568
1569
1570
1571 func stopTheWorldGC(reason stwReason) worldStop {
1572 semacquire(&gcsema)
1573 return stopTheWorld(reason)
1574 }
1575
1576
1577
1578
1579 func startTheWorldGC(w worldStop) {
1580 startTheWorld(w)
1581 semrelease(&gcsema)
1582 }
1583
1584
1585 var worldsema uint32 = 1
1586
1587
1588
1589
1590
1591
1592
1593 var gcsema uint32 = 1
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 func stopTheWorldWithSema(reason stwReason) worldStop {
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1641
1642 trace := traceAcquire()
1643 if trace.ok() {
1644 trace.STWStart(reason)
1645 traceRelease(trace)
1646 }
1647 gp := getg()
1648
1649
1650
1651 if gp.m.locks > 0 {
1652 throw("stopTheWorld: holding locks")
1653 }
1654
1655 lock(&sched.lock)
1656 start := nanotime()
1657 sched.stopwait = gomaxprocs
1658 sched.gcwaiting.Store(true)
1659 preemptall()
1660
1661
1662 gp.m.p.ptr().status = _Pgcstop
1663 gp.m.p.ptr().gcStopTime = start
1664 sched.stopwait--
1665
1666
1667 for _, pp := range allp {
1668 if thread, ok := setBlockOnExitSyscall(pp); ok {
1669 thread.gcstopP()
1670 thread.resume()
1671 }
1672 }
1673
1674
1675 now := nanotime()
1676 for {
1677 pp, _ := pidleget(now)
1678 if pp == nil {
1679 break
1680 }
1681 pp.status = _Pgcstop
1682 pp.gcStopTime = nanotime()
1683 sched.stopwait--
1684 }
1685 wait := sched.stopwait > 0
1686 unlock(&sched.lock)
1687
1688
1689 if wait {
1690 for {
1691
1692 if notetsleep(&sched.stopnote, 100*1000) {
1693 noteclear(&sched.stopnote)
1694 break
1695 }
1696 preemptall()
1697 }
1698 }
1699
1700 finish := nanotime()
1701 startTime := finish - start
1702 if reason.isGC() {
1703 sched.stwStoppingTimeGC.record(startTime)
1704 } else {
1705 sched.stwStoppingTimeOther.record(startTime)
1706 }
1707
1708
1709
1710
1711
1712 stoppingCPUTime := int64(0)
1713 bad := ""
1714 if sched.stopwait != 0 {
1715 bad = "stopTheWorld: not stopped (stopwait != 0)"
1716 } else {
1717 for _, pp := range allp {
1718 if pp.status != _Pgcstop {
1719 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1720 }
1721 if pp.gcStopTime == 0 && bad == "" {
1722 bad = "stopTheWorld: broken CPU time accounting"
1723 }
1724 stoppingCPUTime += finish - pp.gcStopTime
1725 pp.gcStopTime = 0
1726 }
1727 }
1728 if freezing.Load() {
1729
1730
1731
1732
1733 lock(&deadlock)
1734 lock(&deadlock)
1735 }
1736 if bad != "" {
1737 throw(bad)
1738 }
1739
1740 worldStopped()
1741
1742
1743 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1744
1745 return worldStop{
1746 reason: reason,
1747 startedStopping: start,
1748 finishedStopping: finish,
1749 stoppingCPUTime: stoppingCPUTime,
1750 }
1751 }
1752
1753
1754
1755
1756
1757
1758
1759 func startTheWorldWithSema(now int64, w worldStop) int64 {
1760 assertWorldStopped()
1761
1762 mp := acquirem()
1763 if netpollinited() {
1764 list, delta := netpoll(0)
1765 injectglist(&list)
1766 netpollAdjustWaiters(delta)
1767 }
1768 lock(&sched.lock)
1769
1770 procs := gomaxprocs
1771 if newprocs != 0 {
1772 procs = newprocs
1773 newprocs = 0
1774 }
1775 p1 := procresize(procs)
1776 sched.gcwaiting.Store(false)
1777 if sched.sysmonwait.Load() {
1778 sched.sysmonwait.Store(false)
1779 notewakeup(&sched.sysmonnote)
1780 }
1781 unlock(&sched.lock)
1782
1783 worldStarted()
1784
1785 for p1 != nil {
1786 p := p1
1787 p1 = p1.link.ptr()
1788 if p.m != 0 {
1789 mp := p.m.ptr()
1790 p.m = 0
1791 if mp.nextp != 0 {
1792 throw("startTheWorld: inconsistent mp->nextp")
1793 }
1794 mp.nextp.set(p)
1795 notewakeup(&mp.park)
1796 } else {
1797
1798 newm(nil, p, -1)
1799 }
1800 }
1801
1802
1803 if now == 0 {
1804 now = nanotime()
1805 }
1806 totalTime := now - w.startedStopping
1807 if w.reason.isGC() {
1808 sched.stwTotalTimeGC.record(totalTime)
1809 } else {
1810 sched.stwTotalTimeOther.record(totalTime)
1811 }
1812 trace := traceAcquire()
1813 if trace.ok() {
1814 trace.STWDone()
1815 traceRelease(trace)
1816 }
1817
1818
1819
1820
1821 wakep()
1822
1823 releasem(mp)
1824
1825 return now
1826 }
1827
1828
1829
1830 func usesLibcall() bool {
1831 switch GOOS {
1832 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1833 return true
1834 }
1835 return false
1836 }
1837
1838
1839
1840 func mStackIsSystemAllocated() bool {
1841 switch GOOS {
1842 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1843 return true
1844 }
1845 return false
1846 }
1847
1848
1849
1850 func mstart()
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 func mstart0() {
1862 gp := getg()
1863
1864 osStack := gp.stack.lo == 0
1865 if osStack {
1866
1867
1868
1869
1870
1871
1872
1873
1874 size := gp.stack.hi
1875 if size == 0 {
1876 size = 16384 * sys.StackGuardMultiplier
1877 }
1878 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1879 gp.stack.lo = gp.stack.hi - size + 1024
1880 }
1881
1882
1883 gp.stackguard0 = gp.stack.lo + stackGuard
1884
1885
1886 gp.stackguard1 = gp.stackguard0
1887 mstart1()
1888
1889
1890 if mStackIsSystemAllocated() {
1891
1892
1893
1894 osStack = true
1895 }
1896 mexit(osStack)
1897 }
1898
1899
1900
1901
1902
1903 func mstart1() {
1904 gp := getg()
1905
1906 if gp != gp.m.g0 {
1907 throw("bad runtime·mstart")
1908 }
1909
1910
1911
1912
1913
1914
1915
1916 gp.sched.g = guintptr(unsafe.Pointer(gp))
1917 gp.sched.pc = sys.GetCallerPC()
1918 gp.sched.sp = sys.GetCallerSP()
1919
1920 asminit()
1921 minit()
1922
1923
1924
1925 if gp.m == &m0 {
1926 mstartm0()
1927 }
1928
1929 if debug.dataindependenttiming == 1 {
1930 sys.EnableDIT()
1931 }
1932
1933 if fn := gp.m.mstartfn; fn != nil {
1934 fn()
1935 }
1936
1937 if gp.m != &m0 {
1938 acquirep(gp.m.nextp.ptr())
1939 gp.m.nextp = 0
1940 }
1941 schedule()
1942 }
1943
1944
1945
1946
1947
1948
1949
1950 func mstartm0() {
1951
1952
1953
1954 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1955 cgoHasExtraM = true
1956 newextram()
1957 }
1958 initsig(false)
1959 }
1960
1961
1962
1963
1964 func mPark() {
1965 gp := getg()
1966 notesleep(&gp.m.park)
1967 noteclear(&gp.m.park)
1968 }
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980 func mexit(osStack bool) {
1981 mp := getg().m
1982
1983 if mp == &m0 {
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 handoffp(releasep())
1996 lock(&sched.lock)
1997 sched.nmfreed++
1998 checkdead()
1999 unlock(&sched.lock)
2000 mPark()
2001 throw("locked m0 woke up")
2002 }
2003
2004 sigblock(true)
2005 unminit()
2006
2007
2008 if mp.gsignal != nil {
2009 stackfree(mp.gsignal.stack)
2010 if valgrindenabled {
2011 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2012 mp.gsignal.valgrindStackID = 0
2013 }
2014
2015
2016
2017
2018 mp.gsignal = nil
2019 }
2020
2021
2022 vgetrandomDestroy(mp)
2023
2024
2025
2026 mp.self.clear()
2027
2028
2029 lock(&sched.lock)
2030 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2031 if *pprev == mp {
2032 *pprev = mp.alllink
2033 goto found
2034 }
2035 }
2036 throw("m not found in allm")
2037 found:
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 mp.freeWait.Store(freeMWait)
2053 mp.freelink = sched.freem
2054 sched.freem = mp
2055 unlock(&sched.lock)
2056
2057 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2058 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2059
2060
2061 handoffp(releasep())
2062
2063
2064
2065
2066
2067 lock(&sched.lock)
2068 sched.nmfreed++
2069 checkdead()
2070 unlock(&sched.lock)
2071
2072 if GOOS == "darwin" || GOOS == "ios" {
2073
2074
2075 if mp.signalPending.Load() != 0 {
2076 pendingPreemptSignals.Add(-1)
2077 }
2078 }
2079
2080
2081
2082 mdestroy(mp)
2083
2084 if osStack {
2085
2086 mp.freeWait.Store(freeMRef)
2087
2088
2089
2090 return
2091 }
2092
2093
2094
2095
2096
2097 exitThread(&mp.freeWait)
2098 }
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110 func forEachP(reason waitReason, fn func(*p)) {
2111 systemstack(func() {
2112 gp := getg().m.curg
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 casGToWaitingForSuspendG(gp, _Grunning, reason)
2125 forEachPInternal(fn)
2126 casgstatus(gp, _Gwaiting, _Grunning)
2127 })
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 func forEachPInternal(fn func(*p)) {
2140 mp := acquirem()
2141 pp := getg().m.p.ptr()
2142
2143 lock(&sched.lock)
2144 if sched.safePointWait != 0 {
2145 throw("forEachP: sched.safePointWait != 0")
2146 }
2147 sched.safePointWait = gomaxprocs - 1
2148 sched.safePointFn = fn
2149
2150
2151 for _, p2 := range allp {
2152 if p2 != pp {
2153 atomic.Store(&p2.runSafePointFn, 1)
2154 }
2155 }
2156 preemptall()
2157
2158
2159
2160
2161
2162
2163
2164 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2165 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2166 fn(p)
2167 sched.safePointWait--
2168 }
2169 }
2170
2171 wait := sched.safePointWait > 0
2172 unlock(&sched.lock)
2173
2174
2175 fn(pp)
2176
2177
2178
2179 for _, p2 := range allp {
2180 if atomic.Load(&p2.runSafePointFn) != 1 {
2181
2182 continue
2183 }
2184 if thread, ok := setBlockOnExitSyscall(p2); ok {
2185 thread.takeP()
2186 thread.resume()
2187 handoffp(p2)
2188 }
2189 }
2190
2191
2192 if wait {
2193 for {
2194
2195
2196
2197
2198 if notetsleep(&sched.safePointNote, 100*1000) {
2199 noteclear(&sched.safePointNote)
2200 break
2201 }
2202 preemptall()
2203 }
2204 }
2205 if sched.safePointWait != 0 {
2206 throw("forEachP: not done")
2207 }
2208 for _, p2 := range allp {
2209 if p2.runSafePointFn != 0 {
2210 throw("forEachP: P did not run fn")
2211 }
2212 }
2213
2214 lock(&sched.lock)
2215 sched.safePointFn = nil
2216 unlock(&sched.lock)
2217 releasem(mp)
2218 }
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231 func runSafePointFn() {
2232 p := getg().m.p.ptr()
2233
2234
2235
2236 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2237 return
2238 }
2239 sched.safePointFn(p)
2240 lock(&sched.lock)
2241 sched.safePointWait--
2242 if sched.safePointWait == 0 {
2243 notewakeup(&sched.safePointNote)
2244 }
2245 unlock(&sched.lock)
2246 }
2247
2248
2249
2250
2251 var cgoThreadStart unsafe.Pointer
2252
2253 type cgothreadstart struct {
2254 g guintptr
2255 tls *uint64
2256 fn unsafe.Pointer
2257 }
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268 func allocm(pp *p, fn func(), id int64) *m {
2269 allocmLock.rlock()
2270
2271
2272
2273
2274 acquirem()
2275
2276 gp := getg()
2277 if gp.m.p == 0 {
2278 acquirep(pp)
2279 }
2280
2281
2282
2283 if sched.freem != nil {
2284 lock(&sched.lock)
2285 var newList *m
2286 for freem := sched.freem; freem != nil; {
2287
2288 wait := freem.freeWait.Load()
2289 if wait == freeMWait {
2290 next := freem.freelink
2291 freem.freelink = newList
2292 newList = freem
2293 freem = next
2294 continue
2295 }
2296
2297
2298
2299 if traceEnabled() || traceShuttingDown() {
2300 traceThreadDestroy(freem)
2301 }
2302
2303
2304
2305 if wait == freeMStack {
2306
2307
2308
2309 systemstack(func() {
2310 stackfree(freem.g0.stack)
2311 if valgrindenabled {
2312 valgrindDeregisterStack(freem.g0.valgrindStackID)
2313 freem.g0.valgrindStackID = 0
2314 }
2315 })
2316 }
2317 freem = freem.freelink
2318 }
2319 sched.freem = newList
2320 unlock(&sched.lock)
2321 }
2322
2323 mp := &new(mPadded).m
2324 mp.mstartfn = fn
2325 mcommoninit(mp, id)
2326
2327
2328
2329 if iscgo || mStackIsSystemAllocated() {
2330 mp.g0 = malg(-1)
2331 } else {
2332 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2333 }
2334 mp.g0.m = mp
2335
2336 if pp == gp.m.p.ptr() {
2337 releasep()
2338 }
2339
2340 releasem(gp.m)
2341 allocmLock.runlock()
2342 return mp
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 func needm(signal bool) {
2385 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2386
2387
2388
2389
2390
2391
2392 writeErrStr("fatal error: cgo callback before cgo call\n")
2393 exit(1)
2394 }
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404 var sigmask sigset
2405 sigsave(&sigmask)
2406 sigblock(false)
2407
2408
2409
2410
2411 mp, last := getExtraM()
2412
2413
2414
2415
2416
2417
2418
2419
2420 mp.needextram = last
2421
2422
2423 mp.sigmask = sigmask
2424
2425
2426
2427 osSetupTLS(mp)
2428
2429
2430
2431 setg(mp.g0)
2432 sp := sys.GetCallerSP()
2433 callbackUpdateSystemStack(mp, sp, signal)
2434
2435
2436
2437
2438 mp.isExtraInC = false
2439
2440
2441 asminit()
2442 minit()
2443
2444
2445
2446
2447
2448
2449 var trace traceLocker
2450 if !signal {
2451 trace = traceAcquire()
2452 }
2453
2454
2455 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2456 sched.ngsys.Add(-1)
2457 sched.nGsyscallNoP.Add(1)
2458
2459 if !signal {
2460 if trace.ok() {
2461 trace.GoCreateSyscall(mp.curg)
2462 traceRelease(trace)
2463 }
2464 }
2465 mp.isExtraInSig = signal
2466 }
2467
2468
2469
2470
2471 func needAndBindM() {
2472 needm(false)
2473
2474 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2475 cgoBindM()
2476 }
2477 }
2478
2479
2480
2481
2482 func newextram() {
2483 c := extraMWaiters.Swap(0)
2484 if c > 0 {
2485 for i := uint32(0); i < c; i++ {
2486 oneNewExtraM()
2487 }
2488 } else if extraMLength.Load() == 0 {
2489
2490 oneNewExtraM()
2491 }
2492 }
2493
2494
2495 func oneNewExtraM() {
2496
2497
2498
2499
2500
2501 mp := allocm(nil, nil, -1)
2502 gp := malg(4096)
2503 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2504 gp.sched.sp = gp.stack.hi
2505 gp.sched.sp -= 4 * goarch.PtrSize
2506 gp.sched.lr = 0
2507 gp.sched.g = guintptr(unsafe.Pointer(gp))
2508 gp.syscallpc = gp.sched.pc
2509 gp.syscallsp = gp.sched.sp
2510 gp.stktopsp = gp.sched.sp
2511
2512
2513
2514 casgstatus(gp, _Gidle, _Gdeadextra)
2515 gp.m = mp
2516 mp.curg = gp
2517 mp.isextra = true
2518
2519 mp.isExtraInC = true
2520 mp.lockedInt++
2521 mp.lockedg.set(gp)
2522 gp.lockedm.set(mp)
2523 gp.goid = sched.goidgen.Add(1)
2524 if raceenabled {
2525 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2526 }
2527
2528 allgadd(gp)
2529
2530
2531
2532
2533
2534 sched.ngsys.Add(1)
2535
2536
2537 addExtraM(mp)
2538 }
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573 func dropm() {
2574
2575
2576
2577 mp := getg().m
2578
2579
2580
2581
2582
2583 var trace traceLocker
2584 if !mp.isExtraInSig {
2585 trace = traceAcquire()
2586 }
2587
2588
2589 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2590 mp.curg.preemptStop = false
2591 sched.ngsys.Add(1)
2592 sched.nGsyscallNoP.Add(-1)
2593
2594 if !mp.isExtraInSig {
2595 if trace.ok() {
2596 trace.GoDestroySyscall()
2597 traceRelease(trace)
2598 }
2599 }
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614 mp.syscalltick--
2615
2616
2617
2618 mp.curg.trace.reset()
2619
2620
2621
2622
2623 if traceEnabled() || traceShuttingDown() {
2624
2625
2626
2627
2628
2629
2630
2631 lock(&sched.lock)
2632 traceThreadDestroy(mp)
2633 unlock(&sched.lock)
2634 }
2635 mp.isExtraInSig = false
2636
2637
2638
2639
2640
2641 sigmask := mp.sigmask
2642 sigblock(false)
2643 unminit()
2644
2645 setg(nil)
2646
2647
2648
2649 g0 := mp.g0
2650 g0.stack.hi = 0
2651 g0.stack.lo = 0
2652 g0.stackguard0 = 0
2653 g0.stackguard1 = 0
2654 mp.g0StackAccurate = false
2655
2656 putExtraM(mp)
2657
2658 msigrestore(sigmask)
2659 }
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681 func cgoBindM() {
2682 if GOOS == "windows" || GOOS == "plan9" {
2683 fatal("bindm in unexpected GOOS")
2684 }
2685 g := getg()
2686 if g.m.g0 != g {
2687 fatal("the current g is not g0")
2688 }
2689 if _cgo_bindm != nil {
2690 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2691 }
2692 }
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705 func getm() uintptr {
2706 return uintptr(unsafe.Pointer(getg().m))
2707 }
2708
2709 var (
2710
2711
2712
2713
2714
2715
2716 extraM atomic.Uintptr
2717
2718 extraMLength atomic.Uint32
2719
2720 extraMWaiters atomic.Uint32
2721
2722
2723 extraMInUse atomic.Uint32
2724 )
2725
2726
2727
2728
2729
2730
2731
2732
2733 func lockextra(nilokay bool) *m {
2734 const locked = 1
2735
2736 incr := false
2737 for {
2738 old := extraM.Load()
2739 if old == locked {
2740 osyield_no_g()
2741 continue
2742 }
2743 if old == 0 && !nilokay {
2744 if !incr {
2745
2746
2747
2748 extraMWaiters.Add(1)
2749 incr = true
2750 }
2751 usleep_no_g(1)
2752 continue
2753 }
2754 if extraM.CompareAndSwap(old, locked) {
2755 return (*m)(unsafe.Pointer(old))
2756 }
2757 osyield_no_g()
2758 continue
2759 }
2760 }
2761
2762
2763 func unlockextra(mp *m, delta int32) {
2764 extraMLength.Add(delta)
2765 extraM.Store(uintptr(unsafe.Pointer(mp)))
2766 }
2767
2768
2769
2770
2771
2772
2773
2774
2775 func getExtraM() (mp *m, last bool) {
2776 mp = lockextra(false)
2777 extraMInUse.Add(1)
2778 unlockextra(mp.schedlink.ptr(), -1)
2779 return mp, mp.schedlink.ptr() == nil
2780 }
2781
2782
2783
2784
2785
2786 func putExtraM(mp *m) {
2787 extraMInUse.Add(-1)
2788 addExtraM(mp)
2789 }
2790
2791
2792
2793
2794 func addExtraM(mp *m) {
2795 mnext := lockextra(true)
2796 mp.schedlink.set(mnext)
2797 unlockextra(mp, 1)
2798 }
2799
2800 var (
2801
2802
2803
2804 allocmLock rwmutex
2805
2806
2807
2808
2809 execLock rwmutex
2810 )
2811
2812
2813
2814 const (
2815 failthreadcreate = "runtime: failed to create new OS thread\n"
2816 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2817 )
2818
2819
2820
2821
2822 var newmHandoff struct {
2823 lock mutex
2824
2825
2826
2827 newm muintptr
2828
2829
2830
2831 waiting bool
2832 wake note
2833
2834
2835
2836
2837 haveTemplateThread uint32
2838 }
2839
2840
2841
2842
2843
2844
2845
2846
2847 func newm(fn func(), pp *p, id int64) {
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858 acquirem()
2859
2860 mp := allocm(pp, fn, id)
2861 mp.nextp.set(pp)
2862 mp.sigmask = initSigmask
2863 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875 lock(&newmHandoff.lock)
2876 if newmHandoff.haveTemplateThread == 0 {
2877 throw("on a locked thread with no template thread")
2878 }
2879 mp.schedlink = newmHandoff.newm
2880 newmHandoff.newm.set(mp)
2881 if newmHandoff.waiting {
2882 newmHandoff.waiting = false
2883 notewakeup(&newmHandoff.wake)
2884 }
2885 unlock(&newmHandoff.lock)
2886
2887
2888
2889 releasem(getg().m)
2890 return
2891 }
2892 newm1(mp)
2893 releasem(getg().m)
2894 }
2895
2896 func newm1(mp *m) {
2897 if iscgo {
2898 var ts cgothreadstart
2899 if _cgo_thread_start == nil {
2900 throw("_cgo_thread_start missing")
2901 }
2902 ts.g.set(mp.g0)
2903 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2904 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2905 if msanenabled {
2906 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2907 }
2908 if asanenabled {
2909 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2910 }
2911 execLock.rlock()
2912 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2913 execLock.runlock()
2914 return
2915 }
2916 execLock.rlock()
2917 newosproc(mp)
2918 execLock.runlock()
2919 }
2920
2921
2922
2923
2924
2925 func startTemplateThread() {
2926 if GOARCH == "wasm" {
2927 return
2928 }
2929
2930
2931
2932 mp := acquirem()
2933 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2934 releasem(mp)
2935 return
2936 }
2937 newm(templateThread, nil, -1)
2938 releasem(mp)
2939 }
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953 func templateThread() {
2954 lock(&sched.lock)
2955 sched.nmsys++
2956 checkdead()
2957 unlock(&sched.lock)
2958
2959 for {
2960 lock(&newmHandoff.lock)
2961 for newmHandoff.newm != 0 {
2962 newm := newmHandoff.newm.ptr()
2963 newmHandoff.newm = 0
2964 unlock(&newmHandoff.lock)
2965 for newm != nil {
2966 next := newm.schedlink.ptr()
2967 newm.schedlink = 0
2968 newm1(newm)
2969 newm = next
2970 }
2971 lock(&newmHandoff.lock)
2972 }
2973 newmHandoff.waiting = true
2974 noteclear(&newmHandoff.wake)
2975 unlock(&newmHandoff.lock)
2976 notesleep(&newmHandoff.wake)
2977 }
2978 }
2979
2980
2981
2982 func stopm() {
2983 gp := getg()
2984
2985 if gp.m.locks != 0 {
2986 throw("stopm holding locks")
2987 }
2988 if gp.m.p != 0 {
2989 throw("stopm holding p")
2990 }
2991 if gp.m.spinning {
2992 throw("stopm spinning")
2993 }
2994
2995 lock(&sched.lock)
2996 mput(gp.m)
2997 unlock(&sched.lock)
2998 mPark()
2999 acquirep(gp.m.nextp.ptr())
3000 gp.m.nextp = 0
3001 }
3002
3003 func mspinning() {
3004
3005 getg().m.spinning = true
3006 }
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025 func startm(pp *p, spinning, lockheld bool) {
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042 mp := acquirem()
3043 if !lockheld {
3044 lock(&sched.lock)
3045 }
3046 if pp == nil {
3047 if spinning {
3048
3049
3050
3051 throw("startm: P required for spinning=true")
3052 }
3053 pp, _ = pidleget(0)
3054 if pp == nil {
3055 if !lockheld {
3056 unlock(&sched.lock)
3057 }
3058 releasem(mp)
3059 return
3060 }
3061 }
3062 nmp := mget()
3063 if nmp == nil {
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078 id := mReserveID()
3079 unlock(&sched.lock)
3080
3081 var fn func()
3082 if spinning {
3083
3084 fn = mspinning
3085 }
3086 newm(fn, pp, id)
3087
3088 if lockheld {
3089 lock(&sched.lock)
3090 }
3091
3092
3093 releasem(mp)
3094 return
3095 }
3096 if !lockheld {
3097 unlock(&sched.lock)
3098 }
3099 if nmp.spinning {
3100 throw("startm: m is spinning")
3101 }
3102 if nmp.nextp != 0 {
3103 throw("startm: m has p")
3104 }
3105 if spinning && !runqempty(pp) {
3106 throw("startm: p has runnable gs")
3107 }
3108
3109 nmp.spinning = spinning
3110 nmp.nextp.set(pp)
3111 notewakeup(&nmp.park)
3112
3113
3114 releasem(mp)
3115 }
3116
3117
3118
3119
3120
3121 func handoffp(pp *p) {
3122
3123
3124
3125
3126 if !runqempty(pp) || !sched.runq.empty() {
3127 startm(pp, false, false)
3128 return
3129 }
3130
3131 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3132 startm(pp, false, false)
3133 return
3134 }
3135
3136 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3137 startm(pp, false, false)
3138 return
3139 }
3140
3141
3142 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3143 sched.needspinning.Store(0)
3144 startm(pp, true, false)
3145 return
3146 }
3147 lock(&sched.lock)
3148 if sched.gcwaiting.Load() {
3149 pp.status = _Pgcstop
3150 pp.gcStopTime = nanotime()
3151 sched.stopwait--
3152 if sched.stopwait == 0 {
3153 notewakeup(&sched.stopnote)
3154 }
3155 unlock(&sched.lock)
3156 return
3157 }
3158 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3159 sched.safePointFn(pp)
3160 sched.safePointWait--
3161 if sched.safePointWait == 0 {
3162 notewakeup(&sched.safePointNote)
3163 }
3164 }
3165 if !sched.runq.empty() {
3166 unlock(&sched.lock)
3167 startm(pp, false, false)
3168 return
3169 }
3170
3171
3172 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3173 unlock(&sched.lock)
3174 startm(pp, false, false)
3175 return
3176 }
3177
3178
3179
3180 when := pp.timers.wakeTime()
3181 pidleput(pp, 0)
3182 unlock(&sched.lock)
3183
3184 if when != 0 {
3185 wakeNetPoller(when)
3186 }
3187 }
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202 func wakep() {
3203
3204
3205 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3206 return
3207 }
3208
3209
3210
3211
3212
3213
3214 mp := acquirem()
3215
3216 var pp *p
3217 lock(&sched.lock)
3218 pp, _ = pidlegetSpinning(0)
3219 if pp == nil {
3220 if sched.nmspinning.Add(-1) < 0 {
3221 throw("wakep: negative nmspinning")
3222 }
3223 unlock(&sched.lock)
3224 releasem(mp)
3225 return
3226 }
3227
3228
3229
3230
3231 unlock(&sched.lock)
3232
3233 startm(pp, true, false)
3234
3235 releasem(mp)
3236 }
3237
3238
3239
3240 func stoplockedm() {
3241 gp := getg()
3242
3243 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3244 throw("stoplockedm: inconsistent locking")
3245 }
3246 if gp.m.p != 0 {
3247
3248 pp := releasep()
3249 handoffp(pp)
3250 }
3251 incidlelocked(1)
3252
3253 mPark()
3254 status := readgstatus(gp.m.lockedg.ptr())
3255 if status&^_Gscan != _Grunnable {
3256 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3257 dumpgstatus(gp.m.lockedg.ptr())
3258 throw("stoplockedm: not runnable")
3259 }
3260 acquirep(gp.m.nextp.ptr())
3261 gp.m.nextp = 0
3262 }
3263
3264
3265
3266
3267
3268 func startlockedm(gp *g) {
3269 mp := gp.lockedm.ptr()
3270 if mp == getg().m {
3271 throw("startlockedm: locked to me")
3272 }
3273 if mp.nextp != 0 {
3274 throw("startlockedm: m has p")
3275 }
3276
3277 incidlelocked(-1)
3278 pp := releasep()
3279 mp.nextp.set(pp)
3280 notewakeup(&mp.park)
3281 stopm()
3282 }
3283
3284
3285
3286 func gcstopm() {
3287 gp := getg()
3288
3289 if !sched.gcwaiting.Load() {
3290 throw("gcstopm: not waiting for gc")
3291 }
3292 if gp.m.spinning {
3293 gp.m.spinning = false
3294
3295
3296 if sched.nmspinning.Add(-1) < 0 {
3297 throw("gcstopm: negative nmspinning")
3298 }
3299 }
3300 pp := releasep()
3301 lock(&sched.lock)
3302 pp.status = _Pgcstop
3303 pp.gcStopTime = nanotime()
3304 sched.stopwait--
3305 if sched.stopwait == 0 {
3306 notewakeup(&sched.stopnote)
3307 }
3308 unlock(&sched.lock)
3309 stopm()
3310 }
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321 func execute(gp *g, inheritTime bool) {
3322 mp := getg().m
3323
3324 if goroutineProfile.active {
3325
3326
3327
3328 tryRecordGoroutineProfile(gp, nil, osyield)
3329 }
3330
3331
3332 mp.curg = gp
3333 gp.m = mp
3334 gp.syncSafePoint = false
3335 casgstatus(gp, _Grunnable, _Grunning)
3336 gp.waitsince = 0
3337 gp.preempt = false
3338 gp.stackguard0 = gp.stack.lo + stackGuard
3339 if !inheritTime {
3340 mp.p.ptr().schedtick++
3341 }
3342
3343
3344 hz := sched.profilehz
3345 if mp.profilehz != hz {
3346 setThreadCPUProfiler(hz)
3347 }
3348
3349 trace := traceAcquire()
3350 if trace.ok() {
3351 trace.GoStart()
3352 traceRelease(trace)
3353 }
3354
3355 gogo(&gp.sched)
3356 }
3357
3358
3359
3360
3361
3362 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3363 mp := getg().m
3364
3365
3366
3367
3368
3369 top:
3370
3371
3372
3373 mp.clearAllpSnapshot()
3374
3375 pp := mp.p.ptr()
3376 if sched.gcwaiting.Load() {
3377 gcstopm()
3378 goto top
3379 }
3380 if pp.runSafePointFn != 0 {
3381 runSafePointFn()
3382 }
3383
3384
3385
3386
3387
3388 now, pollUntil, _ := pp.timers.check(0, nil)
3389
3390
3391 if traceEnabled() || traceShuttingDown() {
3392 gp := traceReader()
3393 if gp != nil {
3394 trace := traceAcquire()
3395 casgstatus(gp, _Gwaiting, _Grunnable)
3396 if trace.ok() {
3397 trace.GoUnpark(gp, 0)
3398 traceRelease(trace)
3399 }
3400 return gp, false, true
3401 }
3402 }
3403
3404
3405 if gcBlackenEnabled != 0 {
3406 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3407 if gp != nil {
3408 return gp, false, true
3409 }
3410 now = tnow
3411 }
3412
3413
3414
3415
3416 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3417 lock(&sched.lock)
3418 gp := globrunqget()
3419 unlock(&sched.lock)
3420 if gp != nil {
3421 return gp, false, false
3422 }
3423 }
3424
3425
3426 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3427 if gp := wakefing(); gp != nil {
3428 ready(gp, 0, true)
3429 }
3430 }
3431
3432
3433 if gcCleanups.needsWake() {
3434 gcCleanups.wake()
3435 }
3436
3437 if *cgo_yield != nil {
3438 asmcgocall(*cgo_yield, nil)
3439 }
3440
3441
3442 if gp, inheritTime := runqget(pp); gp != nil {
3443 return gp, inheritTime, false
3444 }
3445
3446
3447 if !sched.runq.empty() {
3448 lock(&sched.lock)
3449 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3450 unlock(&sched.lock)
3451 if gp != nil {
3452 if runqputbatch(pp, &q); !q.empty() {
3453 throw("Couldn't put Gs into empty local runq")
3454 }
3455 return gp, false, false
3456 }
3457 }
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3469 list, delta := netpoll(0)
3470 sched.pollingNet.Store(0)
3471 if !list.empty() {
3472 gp := list.pop()
3473 injectglist(&list)
3474 netpollAdjustWaiters(delta)
3475 trace := traceAcquire()
3476 casgstatus(gp, _Gwaiting, _Grunnable)
3477 if trace.ok() {
3478 trace.GoUnpark(gp, 0)
3479 traceRelease(trace)
3480 }
3481 return gp, false, false
3482 }
3483 }
3484
3485
3486
3487
3488
3489
3490 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3491 if !mp.spinning {
3492 mp.becomeSpinning()
3493 }
3494
3495 gp, inheritTime, tnow, w, newWork := stealWork(now)
3496 if gp != nil {
3497
3498 return gp, inheritTime, false
3499 }
3500 if newWork {
3501
3502
3503 goto top
3504 }
3505
3506 now = tnow
3507 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3508
3509 pollUntil = w
3510 }
3511 }
3512
3513
3514
3515
3516
3517 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3518 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3519 if node != nil {
3520 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3521 gp := node.gp.ptr()
3522
3523 trace := traceAcquire()
3524 casgstatus(gp, _Gwaiting, _Grunnable)
3525 if trace.ok() {
3526 trace.GoUnpark(gp, 0)
3527 traceRelease(trace)
3528 }
3529 return gp, false, false
3530 }
3531 gcController.removeIdleMarkWorker()
3532 }
3533
3534
3535
3536
3537
3538 gp, otherReady := beforeIdle(now, pollUntil)
3539 if gp != nil {
3540 trace := traceAcquire()
3541 casgstatus(gp, _Gwaiting, _Grunnable)
3542 if trace.ok() {
3543 trace.GoUnpark(gp, 0)
3544 traceRelease(trace)
3545 }
3546 return gp, false, false
3547 }
3548 if otherReady {
3549 goto top
3550 }
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560 allpSnapshot := mp.snapshotAllp()
3561
3562
3563 idlepMaskSnapshot := idlepMask
3564 timerpMaskSnapshot := timerpMask
3565
3566
3567 lock(&sched.lock)
3568 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3569 unlock(&sched.lock)
3570 goto top
3571 }
3572 if !sched.runq.empty() {
3573 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3574 unlock(&sched.lock)
3575 if gp == nil {
3576 throw("global runq empty with non-zero runqsize")
3577 }
3578 if runqputbatch(pp, &q); !q.empty() {
3579 throw("Couldn't put Gs into empty local runq")
3580 }
3581 return gp, false, false
3582 }
3583 if !mp.spinning && sched.needspinning.Load() == 1 {
3584
3585 mp.becomeSpinning()
3586 unlock(&sched.lock)
3587 goto top
3588 }
3589 if releasep() != pp {
3590 throw("findRunnable: wrong p")
3591 }
3592 now = pidleput(pp, now)
3593 unlock(&sched.lock)
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631 wasSpinning := mp.spinning
3632 if mp.spinning {
3633 mp.spinning = false
3634 if sched.nmspinning.Add(-1) < 0 {
3635 throw("findRunnable: negative nmspinning")
3636 }
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649 lock(&sched.lock)
3650 if !sched.runq.empty() {
3651 pp, _ := pidlegetSpinning(0)
3652 if pp != nil {
3653 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3654 unlock(&sched.lock)
3655 if gp == nil {
3656 throw("global runq empty with non-zero runqsize")
3657 }
3658 if runqputbatch(pp, &q); !q.empty() {
3659 throw("Couldn't put Gs into empty local runq")
3660 }
3661 acquirep(pp)
3662 mp.becomeSpinning()
3663 return gp, false, false
3664 }
3665 }
3666 unlock(&sched.lock)
3667
3668 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3669 if pp != nil {
3670 acquirep(pp)
3671 mp.becomeSpinning()
3672 goto top
3673 }
3674
3675
3676 pp, gp := checkIdleGCNoP()
3677 if pp != nil {
3678 acquirep(pp)
3679 mp.becomeSpinning()
3680
3681
3682 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3683 trace := traceAcquire()
3684 casgstatus(gp, _Gwaiting, _Grunnable)
3685 if trace.ok() {
3686 trace.GoUnpark(gp, 0)
3687 traceRelease(trace)
3688 }
3689 return gp, false, false
3690 }
3691
3692
3693
3694
3695
3696
3697
3698 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3699 }
3700
3701
3702
3703
3704
3705 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3706 sched.pollUntil.Store(pollUntil)
3707 if mp.p != 0 {
3708 throw("findRunnable: netpoll with p")
3709 }
3710 if mp.spinning {
3711 throw("findRunnable: netpoll with spinning")
3712 }
3713 delay := int64(-1)
3714 if pollUntil != 0 {
3715 if now == 0 {
3716 now = nanotime()
3717 }
3718 delay = pollUntil - now
3719 if delay < 0 {
3720 delay = 0
3721 }
3722 }
3723 if faketime != 0 {
3724
3725 delay = 0
3726 }
3727 list, delta := netpoll(delay)
3728
3729 now = nanotime()
3730 sched.pollUntil.Store(0)
3731 sched.lastpoll.Store(now)
3732 if faketime != 0 && list.empty() {
3733
3734
3735 stopm()
3736 goto top
3737 }
3738 lock(&sched.lock)
3739 pp, _ := pidleget(now)
3740 unlock(&sched.lock)
3741 if pp == nil {
3742 injectglist(&list)
3743 netpollAdjustWaiters(delta)
3744 } else {
3745 acquirep(pp)
3746 if !list.empty() {
3747 gp := list.pop()
3748 injectglist(&list)
3749 netpollAdjustWaiters(delta)
3750 trace := traceAcquire()
3751 casgstatus(gp, _Gwaiting, _Grunnable)
3752 if trace.ok() {
3753 trace.GoUnpark(gp, 0)
3754 traceRelease(trace)
3755 }
3756 return gp, false, false
3757 }
3758 if wasSpinning {
3759 mp.becomeSpinning()
3760 }
3761 goto top
3762 }
3763 } else if pollUntil != 0 && netpollinited() {
3764 pollerPollUntil := sched.pollUntil.Load()
3765 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3766 netpollBreak()
3767 }
3768 }
3769 stopm()
3770 goto top
3771 }
3772
3773
3774
3775
3776
3777 func pollWork() bool {
3778 if !sched.runq.empty() {
3779 return true
3780 }
3781 p := getg().m.p.ptr()
3782 if !runqempty(p) {
3783 return true
3784 }
3785 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3786 if list, delta := netpoll(0); !list.empty() {
3787 injectglist(&list)
3788 netpollAdjustWaiters(delta)
3789 return true
3790 }
3791 }
3792 return false
3793 }
3794
3795
3796
3797
3798
3799
3800
3801 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3802 pp := getg().m.p.ptr()
3803
3804 ranTimer := false
3805
3806 const stealTries = 4
3807 for i := 0; i < stealTries; i++ {
3808 stealTimersOrRunNextG := i == stealTries-1
3809
3810 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3811 if sched.gcwaiting.Load() {
3812
3813 return nil, false, now, pollUntil, true
3814 }
3815 p2 := allp[enum.position()]
3816 if pp == p2 {
3817 continue
3818 }
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3834 tnow, w, ran := p2.timers.check(now, nil)
3835 now = tnow
3836 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3837 pollUntil = w
3838 }
3839 if ran {
3840
3841
3842
3843
3844
3845
3846
3847
3848 if gp, inheritTime := runqget(pp); gp != nil {
3849 return gp, inheritTime, now, pollUntil, ranTimer
3850 }
3851 ranTimer = true
3852 }
3853 }
3854
3855
3856 if !idlepMask.read(enum.position()) {
3857 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3858 return gp, false, now, pollUntil, ranTimer
3859 }
3860 }
3861 }
3862 }
3863
3864
3865
3866
3867 return nil, false, now, pollUntil, ranTimer
3868 }
3869
3870
3871
3872
3873
3874
3875 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3876 for id, p2 := range allpSnapshot {
3877 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3878 lock(&sched.lock)
3879 pp, _ := pidlegetSpinning(0)
3880 if pp == nil {
3881
3882 unlock(&sched.lock)
3883 return nil
3884 }
3885 unlock(&sched.lock)
3886 return pp
3887 }
3888 }
3889
3890
3891 return nil
3892 }
3893
3894
3895
3896
3897 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3898 for id, p2 := range allpSnapshot {
3899 if timerpMaskSnapshot.read(uint32(id)) {
3900 w := p2.timers.wakeTime()
3901 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3902 pollUntil = w
3903 }
3904 }
3905 }
3906
3907 return pollUntil
3908 }
3909
3910
3911
3912
3913
3914 func checkIdleGCNoP() (*p, *g) {
3915
3916
3917
3918
3919
3920
3921 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3922 return nil, nil
3923 }
3924 if !gcShouldScheduleWorker(nil) {
3925 return nil, nil
3926 }
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945 lock(&sched.lock)
3946 pp, now := pidlegetSpinning(0)
3947 if pp == nil {
3948 unlock(&sched.lock)
3949 return nil, nil
3950 }
3951
3952
3953 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3954 pidleput(pp, now)
3955 unlock(&sched.lock)
3956 return nil, nil
3957 }
3958
3959 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3960 if node == nil {
3961 pidleput(pp, now)
3962 unlock(&sched.lock)
3963 gcController.removeIdleMarkWorker()
3964 return nil, nil
3965 }
3966
3967 unlock(&sched.lock)
3968
3969 return pp, node.gp.ptr()
3970 }
3971
3972
3973
3974
3975 func wakeNetPoller(when int64) {
3976 if sched.lastpoll.Load() == 0 {
3977
3978
3979
3980
3981 pollerPollUntil := sched.pollUntil.Load()
3982 if pollerPollUntil == 0 || pollerPollUntil > when {
3983 netpollBreak()
3984 }
3985 } else {
3986
3987
3988 if GOOS != "plan9" {
3989 wakep()
3990 }
3991 }
3992 }
3993
3994 func resetspinning() {
3995 gp := getg()
3996 if !gp.m.spinning {
3997 throw("resetspinning: not a spinning m")
3998 }
3999 gp.m.spinning = false
4000 nmspinning := sched.nmspinning.Add(-1)
4001 if nmspinning < 0 {
4002 throw("findRunnable: negative nmspinning")
4003 }
4004
4005
4006
4007 wakep()
4008 }
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018 func injectglist(glist *gList) {
4019 if glist.empty() {
4020 return
4021 }
4022
4023
4024
4025 var tail *g
4026 trace := traceAcquire()
4027 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4028 tail = gp
4029 casgstatus(gp, _Gwaiting, _Grunnable)
4030 if trace.ok() {
4031 trace.GoUnpark(gp, 0)
4032 }
4033 }
4034 if trace.ok() {
4035 traceRelease(trace)
4036 }
4037
4038
4039 q := gQueue{glist.head, tail.guintptr(), glist.size}
4040 *glist = gList{}
4041
4042 startIdle := func(n int32) {
4043 for ; n > 0; n-- {
4044 mp := acquirem()
4045 lock(&sched.lock)
4046
4047 pp, _ := pidlegetSpinning(0)
4048 if pp == nil {
4049 unlock(&sched.lock)
4050 releasem(mp)
4051 break
4052 }
4053
4054 startm(pp, false, true)
4055 unlock(&sched.lock)
4056 releasem(mp)
4057 }
4058 }
4059
4060 pp := getg().m.p.ptr()
4061 if pp == nil {
4062 n := q.size
4063 lock(&sched.lock)
4064 globrunqputbatch(&q)
4065 unlock(&sched.lock)
4066 startIdle(n)
4067 return
4068 }
4069
4070 var globq gQueue
4071 npidle := sched.npidle.Load()
4072 for ; npidle > 0 && !q.empty(); npidle-- {
4073 g := q.pop()
4074 globq.pushBack(g)
4075 }
4076 if !globq.empty() {
4077 n := globq.size
4078 lock(&sched.lock)
4079 globrunqputbatch(&globq)
4080 unlock(&sched.lock)
4081 startIdle(n)
4082 }
4083
4084 if runqputbatch(pp, &q); !q.empty() {
4085 lock(&sched.lock)
4086 globrunqputbatch(&q)
4087 unlock(&sched.lock)
4088 }
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103 wakep()
4104 }
4105
4106
4107
4108 func schedule() {
4109 mp := getg().m
4110
4111 if mp.locks != 0 {
4112 throw("schedule: holding locks")
4113 }
4114
4115 if mp.lockedg != 0 {
4116 stoplockedm()
4117 execute(mp.lockedg.ptr(), false)
4118 }
4119
4120
4121
4122 if mp.incgo {
4123 throw("schedule: in cgo")
4124 }
4125
4126 top:
4127 pp := mp.p.ptr()
4128 pp.preempt = false
4129
4130
4131
4132
4133 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4134 throw("schedule: spinning with local work")
4135 }
4136
4137 gp, inheritTime, tryWakeP := findRunnable()
4138
4139
4140 pp = mp.p.ptr()
4141
4142
4143
4144
4145 mp.clearAllpSnapshot()
4146
4147
4148
4149
4150
4151
4152
4153
4154 gcController.releaseNextGCMarkWorker(pp)
4155
4156 if debug.dontfreezetheworld > 0 && freezing.Load() {
4157
4158
4159
4160
4161
4162
4163
4164 lock(&deadlock)
4165 lock(&deadlock)
4166 }
4167
4168
4169
4170
4171 if mp.spinning {
4172 resetspinning()
4173 }
4174
4175 if sched.disable.user && !schedEnabled(gp) {
4176
4177
4178
4179 lock(&sched.lock)
4180 if schedEnabled(gp) {
4181
4182
4183 unlock(&sched.lock)
4184 } else {
4185 sched.disable.runnable.pushBack(gp)
4186 unlock(&sched.lock)
4187 goto top
4188 }
4189 }
4190
4191
4192
4193 if tryWakeP {
4194 wakep()
4195 }
4196 if gp.lockedm != 0 {
4197
4198
4199 startlockedm(gp)
4200 goto top
4201 }
4202
4203 execute(gp, inheritTime)
4204 }
4205
4206
4207
4208
4209
4210
4211
4212
4213 func dropg() {
4214 gp := getg()
4215
4216 setMNoWB(&gp.m.curg.m, nil)
4217 setGNoWB(&gp.m.curg, nil)
4218 }
4219
4220 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4221 unlock((*mutex)(lock))
4222 return true
4223 }
4224
4225
4226 func park_m(gp *g) {
4227 mp := getg().m
4228
4229 trace := traceAcquire()
4230
4231
4232
4233
4234
4235 bubble := gp.bubble
4236 if bubble != nil {
4237 bubble.incActive()
4238 }
4239
4240 if trace.ok() {
4241
4242
4243
4244 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4245 }
4246
4247
4248 casgstatus(gp, _Grunning, _Gwaiting)
4249 if trace.ok() {
4250 traceRelease(trace)
4251 }
4252
4253 dropg()
4254
4255 if fn := mp.waitunlockf; fn != nil {
4256 ok := fn(gp, mp.waitlock)
4257 mp.waitunlockf = nil
4258 mp.waitlock = nil
4259 if !ok {
4260 trace := traceAcquire()
4261 casgstatus(gp, _Gwaiting, _Grunnable)
4262 if bubble != nil {
4263 bubble.decActive()
4264 }
4265 if trace.ok() {
4266 trace.GoUnpark(gp, 2)
4267 traceRelease(trace)
4268 }
4269 execute(gp, true)
4270 }
4271 }
4272
4273 if bubble != nil {
4274 bubble.decActive()
4275 }
4276
4277 schedule()
4278 }
4279
4280 func goschedImpl(gp *g, preempted bool) {
4281 pp := gp.m.p.ptr()
4282 trace := traceAcquire()
4283 status := readgstatus(gp)
4284 if status&^_Gscan != _Grunning {
4285 dumpgstatus(gp)
4286 throw("bad g status")
4287 }
4288 if trace.ok() {
4289
4290
4291
4292 if preempted {
4293 trace.GoPreempt()
4294 } else {
4295 trace.GoSched()
4296 }
4297 }
4298 casgstatus(gp, _Grunning, _Grunnable)
4299 if trace.ok() {
4300 traceRelease(trace)
4301 }
4302
4303 dropg()
4304 if preempted && sched.gcwaiting.Load() {
4305
4306
4307 runqput(pp, gp, true)
4308 } else {
4309 lock(&sched.lock)
4310 globrunqput(gp)
4311 unlock(&sched.lock)
4312 }
4313
4314 if mainStarted {
4315 wakep()
4316 }
4317
4318 schedule()
4319 }
4320
4321
4322 func gosched_m(gp *g) {
4323 goschedImpl(gp, false)
4324 }
4325
4326
4327 func goschedguarded_m(gp *g) {
4328 if !canPreemptM(gp.m) {
4329 gogo(&gp.sched)
4330 }
4331 goschedImpl(gp, false)
4332 }
4333
4334 func gopreempt_m(gp *g) {
4335 goschedImpl(gp, true)
4336 }
4337
4338
4339
4340
4341 func preemptPark(gp *g) {
4342 status := readgstatus(gp)
4343 if status&^_Gscan != _Grunning {
4344 dumpgstatus(gp)
4345 throw("bad g status")
4346 }
4347
4348 if gp.asyncSafePoint {
4349
4350
4351
4352 f := findfunc(gp.sched.pc)
4353 if !f.valid() {
4354 throw("preempt at unknown pc")
4355 }
4356 if f.flag&abi.FuncFlagSPWrite != 0 {
4357 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4358 throw("preempt SPWRITE")
4359 }
4360 }
4361
4362
4363
4364
4365
4366
4367
4368 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4369 dropg()
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391 trace := traceAcquire()
4392 if trace.ok() {
4393 trace.GoPark(traceBlockPreempted, 0)
4394 }
4395 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4396 if trace.ok() {
4397 traceRelease(trace)
4398 }
4399 schedule()
4400 }
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416 func goyield() {
4417 checkTimeouts()
4418 mcall(goyield_m)
4419 }
4420
4421 func goyield_m(gp *g) {
4422 trace := traceAcquire()
4423 pp := gp.m.p.ptr()
4424 if trace.ok() {
4425
4426
4427
4428 trace.GoPreempt()
4429 }
4430 casgstatus(gp, _Grunning, _Grunnable)
4431 if trace.ok() {
4432 traceRelease(trace)
4433 }
4434 dropg()
4435 runqput(pp, gp, false)
4436 schedule()
4437 }
4438
4439
4440 func goexit1() {
4441 if raceenabled {
4442 if gp := getg(); gp.bubble != nil {
4443 racereleasemergeg(gp, gp.bubble.raceaddr())
4444 }
4445 racegoend()
4446 }
4447 trace := traceAcquire()
4448 if trace.ok() {
4449 trace.GoEnd()
4450 traceRelease(trace)
4451 }
4452 mcall(goexit0)
4453 }
4454
4455
4456 func goexit0(gp *g) {
4457 gdestroy(gp)
4458 schedule()
4459 }
4460
4461 func gdestroy(gp *g) {
4462 mp := getg().m
4463 pp := mp.p.ptr()
4464
4465 casgstatus(gp, _Grunning, _Gdead)
4466 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4467 if isSystemGoroutine(gp, false) {
4468 sched.ngsys.Add(-1)
4469 }
4470 gp.m = nil
4471 locked := gp.lockedm != 0
4472 gp.lockedm = 0
4473 mp.lockedg = 0
4474 gp.preemptStop = false
4475 gp.paniconfault = false
4476 gp._defer = nil
4477 gp._panic = nil
4478 gp.writebuf = nil
4479 gp.waitreason = waitReasonZero
4480 gp.param = nil
4481 gp.labels = nil
4482 gp.timer = nil
4483 gp.bubble = nil
4484
4485 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4486
4487
4488
4489 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4490 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4491 gcController.bgScanCredit.Add(scanCredit)
4492 gp.gcAssistBytes = 0
4493 }
4494
4495 dropg()
4496
4497 if GOARCH == "wasm" {
4498 gfput(pp, gp)
4499 return
4500 }
4501
4502 if locked && mp.lockedInt != 0 {
4503 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4504 if mp.isextra {
4505 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4506 }
4507 throw("exited a goroutine internally locked to the OS thread")
4508 }
4509 gfput(pp, gp)
4510 if locked {
4511
4512
4513
4514
4515
4516
4517 if GOOS != "plan9" {
4518 gogo(&mp.g0.sched)
4519 } else {
4520
4521
4522 mp.lockedExt = 0
4523 }
4524 }
4525 }
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535 func save(pc, sp, bp uintptr) {
4536 gp := getg()
4537
4538 if gp == gp.m.g0 || gp == gp.m.gsignal {
4539
4540
4541
4542
4543
4544 throw("save on system g not allowed")
4545 }
4546
4547 gp.sched.pc = pc
4548 gp.sched.sp = sp
4549 gp.sched.lr = 0
4550 gp.sched.bp = bp
4551
4552
4553
4554 if gp.sched.ctxt != nil {
4555 badctxt()
4556 }
4557 }
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583 func reentersyscall(pc, sp, bp uintptr) {
4584 gp := getg()
4585
4586
4587
4588 gp.m.locks++
4589
4590
4591
4592
4593
4594 gp.stackguard0 = stackPreempt
4595 gp.throwsplit = true
4596
4597
4598 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4599
4600 pp := gp.m.p.ptr()
4601 if pp.runSafePointFn != 0 {
4602
4603 systemstack(runSafePointFn)
4604 }
4605 gp.m.oldp.set(pp)
4606
4607
4608 save(pc, sp, bp)
4609 gp.syscallsp = sp
4610 gp.syscallpc = pc
4611 gp.syscallbp = bp
4612
4613
4614 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4615 systemstack(func() {
4616 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4617 throw("entersyscall")
4618 })
4619 }
4620 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4621 systemstack(func() {
4622 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4623 throw("entersyscall")
4624 })
4625 }
4626 trace := traceAcquire()
4627 if trace.ok() {
4628
4629
4630
4631
4632 systemstack(func() {
4633 trace.GoSysCall()
4634 })
4635
4636 save(pc, sp, bp)
4637 }
4638 if sched.gcwaiting.Load() {
4639
4640
4641
4642 systemstack(func() {
4643 entersyscallHandleGCWait(trace)
4644 })
4645
4646 save(pc, sp, bp)
4647 }
4648
4649
4650
4651
4652
4653 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4654 casgstatus(gp, _Grunning, _Gsyscall)
4655 }
4656 if staticLockRanking {
4657
4658 save(pc, sp, bp)
4659 }
4660 if trace.ok() {
4661
4662
4663
4664 traceRelease(trace)
4665 }
4666 if sched.sysmonwait.Load() {
4667 systemstack(entersyscallWakeSysmon)
4668
4669 save(pc, sp, bp)
4670 }
4671 gp.m.locks--
4672 }
4673
4674
4675
4676
4677 const debugExtendGrunningNoP = false
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693 func entersyscall() {
4694
4695
4696
4697
4698 fp := getcallerfp()
4699 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4700 }
4701
4702 func entersyscallWakeSysmon() {
4703 lock(&sched.lock)
4704 if sched.sysmonwait.Load() {
4705 sched.sysmonwait.Store(false)
4706 notewakeup(&sched.sysmonnote)
4707 }
4708 unlock(&sched.lock)
4709 }
4710
4711 func entersyscallHandleGCWait(trace traceLocker) {
4712 gp := getg()
4713
4714 lock(&sched.lock)
4715 if sched.stopwait > 0 {
4716
4717 pp := gp.m.p.ptr()
4718 pp.m = 0
4719 gp.m.p = 0
4720 atomic.Store(&pp.status, _Pgcstop)
4721
4722 if trace.ok() {
4723 trace.ProcStop(pp)
4724 }
4725 sched.nGsyscallNoP.Add(1)
4726 pp.gcStopTime = nanotime()
4727 pp.syscalltick++
4728 if sched.stopwait--; sched.stopwait == 0 {
4729 notewakeup(&sched.stopnote)
4730 }
4731 }
4732 unlock(&sched.lock)
4733 }
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747 func entersyscallblock() {
4748 gp := getg()
4749
4750 gp.m.locks++
4751 gp.throwsplit = true
4752 gp.stackguard0 = stackPreempt
4753 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4754 gp.m.p.ptr().syscalltick++
4755
4756 sched.nGsyscallNoP.Add(1)
4757
4758
4759 pc := sys.GetCallerPC()
4760 sp := sys.GetCallerSP()
4761 bp := getcallerfp()
4762 save(pc, sp, bp)
4763 gp.syscallsp = gp.sched.sp
4764 gp.syscallpc = gp.sched.pc
4765 gp.syscallbp = gp.sched.bp
4766 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4767 sp1 := sp
4768 sp2 := gp.sched.sp
4769 sp3 := gp.syscallsp
4770 systemstack(func() {
4771 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4772 throw("entersyscallblock")
4773 })
4774 }
4775
4776
4777
4778
4779
4780
4781 trace := traceAcquire()
4782 systemstack(func() {
4783 if trace.ok() {
4784 trace.GoSysCall()
4785 }
4786 handoffp(releasep())
4787 })
4788
4789
4790
4791 if debugExtendGrunningNoP {
4792 usleep(10)
4793 }
4794 casgstatus(gp, _Grunning, _Gsyscall)
4795 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4796 systemstack(func() {
4797 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4798 throw("entersyscallblock")
4799 })
4800 }
4801 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4802 systemstack(func() {
4803 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4804 throw("entersyscallblock")
4805 })
4806 }
4807 if trace.ok() {
4808 systemstack(func() {
4809 traceRelease(trace)
4810 })
4811 }
4812
4813
4814 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4815
4816 gp.m.locks--
4817 }
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839 func exitsyscall() {
4840 gp := getg()
4841
4842 gp.m.locks++
4843 if sys.GetCallerSP() > gp.syscallsp {
4844 throw("exitsyscall: syscall frame is no longer valid")
4845 }
4846 gp.waitsince = 0
4847
4848 if sched.stopwait == freezeStopWait {
4849
4850
4851
4852 systemstack(func() {
4853 lock(&deadlock)
4854 lock(&deadlock)
4855 })
4856 }
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4870 casgstatus(gp, _Gsyscall, _Grunning)
4871 }
4872
4873
4874
4875
4876 if debugExtendGrunningNoP {
4877 usleep(10)
4878 }
4879
4880
4881 oldp := gp.m.oldp.ptr()
4882 gp.m.oldp.set(nil)
4883
4884
4885 pp := gp.m.p.ptr()
4886 if pp != nil {
4887
4888 if trace := traceAcquire(); trace.ok() {
4889 systemstack(func() {
4890
4891
4892
4893
4894
4895
4896
4897
4898 if pp.syscalltick == gp.m.syscalltick {
4899 trace.GoSysExit(false)
4900 } else {
4901
4902
4903
4904
4905 trace.ProcSteal(pp)
4906 trace.ProcStart()
4907 trace.GoSysExit(true)
4908 trace.GoStart()
4909 }
4910 traceRelease(trace)
4911 })
4912 }
4913 } else {
4914
4915 systemstack(func() {
4916
4917 if pp := exitsyscallTryGetP(oldp); pp != nil {
4918
4919 acquirepNoTrace(pp)
4920
4921
4922 if trace := traceAcquire(); trace.ok() {
4923 trace.ProcStart()
4924 trace.GoSysExit(true)
4925 trace.GoStart()
4926 traceRelease(trace)
4927 }
4928 }
4929 })
4930 pp = gp.m.p.ptr()
4931 }
4932
4933
4934 if pp != nil {
4935 if goroutineProfile.active {
4936
4937
4938
4939 systemstack(func() {
4940 tryRecordGoroutineProfileWB(gp)
4941 })
4942 }
4943
4944
4945 pp.syscalltick++
4946
4947
4948
4949 gp.syscallsp = 0
4950 gp.m.locks--
4951 if gp.preempt {
4952
4953 gp.stackguard0 = stackPreempt
4954 } else {
4955
4956 gp.stackguard0 = gp.stack.lo + stackGuard
4957 }
4958 gp.throwsplit = false
4959
4960 if sched.disable.user && !schedEnabled(gp) {
4961
4962 Gosched()
4963 }
4964 return
4965 }
4966
4967 gp.m.locks--
4968
4969
4970 mcall(exitsyscallNoP)
4971
4972
4973
4974
4975
4976
4977
4978 gp.syscallsp = 0
4979 gp.m.p.ptr().syscalltick++
4980 gp.throwsplit = false
4981 }
4982
4983
4984
4985
4986
4987
4988
4989 func exitsyscallTryGetP(oldp *p) *p {
4990
4991 if oldp != nil {
4992 if thread, ok := setBlockOnExitSyscall(oldp); ok {
4993 thread.takeP()
4994 thread.resume()
4995 sched.nGsyscallNoP.Add(-1)
4996 return oldp
4997 }
4998 }
4999
5000
5001 if sched.pidle != 0 {
5002 lock(&sched.lock)
5003 pp, _ := pidleget(0)
5004 if pp != nil && sched.sysmonwait.Load() {
5005 sched.sysmonwait.Store(false)
5006 notewakeup(&sched.sysmonnote)
5007 }
5008 unlock(&sched.lock)
5009 if pp != nil {
5010 sched.nGsyscallNoP.Add(-1)
5011 return pp
5012 }
5013 }
5014 return nil
5015 }
5016
5017
5018
5019
5020
5021
5022
5023 func exitsyscallNoP(gp *g) {
5024 traceExitingSyscall()
5025 trace := traceAcquire()
5026 casgstatus(gp, _Grunning, _Grunnable)
5027 traceExitedSyscall()
5028 if trace.ok() {
5029
5030
5031
5032
5033 trace.GoSysExit(true)
5034 traceRelease(trace)
5035 }
5036 sched.nGsyscallNoP.Add(-1)
5037 dropg()
5038 lock(&sched.lock)
5039 var pp *p
5040 if schedEnabled(gp) {
5041 pp, _ = pidleget(0)
5042 }
5043 var locked bool
5044 if pp == nil {
5045 globrunqput(gp)
5046
5047
5048
5049
5050
5051
5052 locked = gp.lockedm != 0
5053 } else if sched.sysmonwait.Load() {
5054 sched.sysmonwait.Store(false)
5055 notewakeup(&sched.sysmonnote)
5056 }
5057 unlock(&sched.lock)
5058 if pp != nil {
5059 acquirep(pp)
5060 execute(gp, false)
5061 }
5062 if locked {
5063
5064
5065
5066
5067 stoplockedm()
5068 execute(gp, false)
5069 }
5070 stopm()
5071 schedule()
5072 }
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086 func syscall_runtime_BeforeFork() {
5087 gp := getg().m.curg
5088
5089
5090
5091
5092 gp.m.locks++
5093 sigsave(&gp.m.sigmask)
5094 sigblock(false)
5095
5096
5097
5098
5099
5100 gp.stackguard0 = stackFork
5101 }
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115 func syscall_runtime_AfterFork() {
5116 gp := getg().m.curg
5117
5118
5119 gp.stackguard0 = gp.stack.lo + stackGuard
5120
5121 msigrestore(gp.m.sigmask)
5122
5123 gp.m.locks--
5124 }
5125
5126
5127
5128 var inForkedChild bool
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149 func syscall_runtime_AfterForkInChild() {
5150
5151
5152
5153
5154 inForkedChild = true
5155
5156 clearSignalHandlers()
5157
5158
5159
5160 msigrestore(getg().m.sigmask)
5161
5162 inForkedChild = false
5163 }
5164
5165
5166
5167
5168 var pendingPreemptSignals atomic.Int32
5169
5170
5171
5172
5173 func syscall_runtime_BeforeExec() {
5174
5175 execLock.lock()
5176
5177
5178
5179 if GOOS == "darwin" || GOOS == "ios" {
5180 for pendingPreemptSignals.Load() > 0 {
5181 osyield()
5182 }
5183 }
5184 }
5185
5186
5187
5188
5189 func syscall_runtime_AfterExec() {
5190 execLock.unlock()
5191 }
5192
5193
5194 func malg(stacksize int32) *g {
5195 newg := new(g)
5196 if stacksize >= 0 {
5197 stacksize = round2(stackSystem + stacksize)
5198 systemstack(func() {
5199 newg.stack = stackalloc(uint32(stacksize))
5200 if valgrindenabled {
5201 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5202 }
5203 })
5204 newg.stackguard0 = newg.stack.lo + stackGuard
5205 newg.stackguard1 = ^uintptr(0)
5206
5207
5208 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5209 }
5210 return newg
5211 }
5212
5213
5214
5215
5216 func newproc(fn *funcval) {
5217 gp := getg()
5218 pc := sys.GetCallerPC()
5219 systemstack(func() {
5220 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5221
5222 pp := getg().m.p.ptr()
5223 runqput(pp, newg, true)
5224
5225 if mainStarted {
5226 wakep()
5227 }
5228 })
5229 }
5230
5231
5232
5233
5234 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5235 if fn == nil {
5236 fatal("go of nil func value")
5237 }
5238
5239 mp := acquirem()
5240 pp := mp.p.ptr()
5241 newg := gfget(pp)
5242 if newg == nil {
5243 newg = malg(stackMin)
5244 casgstatus(newg, _Gidle, _Gdead)
5245 allgadd(newg)
5246 }
5247 if newg.stack.hi == 0 {
5248 throw("newproc1: newg missing stack")
5249 }
5250
5251 if readgstatus(newg) != _Gdead {
5252 throw("newproc1: new g is not Gdead")
5253 }
5254
5255 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5256 totalSize = alignUp(totalSize, sys.StackAlign)
5257 sp := newg.stack.hi - totalSize
5258 if usesLR {
5259
5260 *(*uintptr)(unsafe.Pointer(sp)) = 0
5261 prepGoExitFrame(sp)
5262 }
5263 if GOARCH == "arm64" {
5264
5265 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5266 }
5267
5268 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5269 newg.sched.sp = sp
5270 newg.stktopsp = sp
5271 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5272 newg.sched.g = guintptr(unsafe.Pointer(newg))
5273 gostartcallfn(&newg.sched, fn)
5274 newg.parentGoid = callergp.goid
5275 newg.gopc = callerpc
5276 newg.ancestors = saveAncestors(callergp)
5277 newg.startpc = fn.fn
5278 newg.runningCleanups.Store(false)
5279 if isSystemGoroutine(newg, false) {
5280 sched.ngsys.Add(1)
5281 } else {
5282
5283 newg.bubble = callergp.bubble
5284 if mp.curg != nil {
5285 newg.labels = mp.curg.labels
5286 }
5287 if goroutineProfile.active {
5288
5289
5290
5291
5292
5293 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5294 }
5295 }
5296
5297 newg.trackingSeq = uint8(cheaprand())
5298 if newg.trackingSeq%gTrackingPeriod == 0 {
5299 newg.tracking = true
5300 }
5301 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5302
5303
5304
5305 trace := traceAcquire()
5306 var status uint32 = _Grunnable
5307 if parked {
5308 status = _Gwaiting
5309 newg.waitreason = waitreason
5310 }
5311 if pp.goidcache == pp.goidcacheend {
5312
5313
5314
5315 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5316 pp.goidcache -= _GoidCacheBatch - 1
5317 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5318 }
5319 newg.goid = pp.goidcache
5320 casgstatus(newg, _Gdead, status)
5321 pp.goidcache++
5322 newg.trace.reset()
5323 if trace.ok() {
5324 trace.GoCreate(newg, newg.startpc, parked)
5325 traceRelease(trace)
5326 }
5327
5328
5329 if raceenabled {
5330 newg.racectx = racegostart(callerpc)
5331 newg.raceignore = 0
5332 if newg.labels != nil {
5333
5334
5335 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5336 }
5337 }
5338 pp.goroutinesCreated++
5339 releasem(mp)
5340
5341 return newg
5342 }
5343
5344
5345
5346
5347 func saveAncestors(callergp *g) *[]ancestorInfo {
5348
5349 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5350 return nil
5351 }
5352 var callerAncestors []ancestorInfo
5353 if callergp.ancestors != nil {
5354 callerAncestors = *callergp.ancestors
5355 }
5356 n := int32(len(callerAncestors)) + 1
5357 if n > debug.tracebackancestors {
5358 n = debug.tracebackancestors
5359 }
5360 ancestors := make([]ancestorInfo, n)
5361 copy(ancestors[1:], callerAncestors)
5362
5363 var pcs [tracebackInnerFrames]uintptr
5364 npcs := gcallers(callergp, 0, pcs[:])
5365 ipcs := make([]uintptr, npcs)
5366 copy(ipcs, pcs[:])
5367 ancestors[0] = ancestorInfo{
5368 pcs: ipcs,
5369 goid: callergp.goid,
5370 gopc: callergp.gopc,
5371 }
5372
5373 ancestorsp := new([]ancestorInfo)
5374 *ancestorsp = ancestors
5375 return ancestorsp
5376 }
5377
5378
5379
5380 func gfput(pp *p, gp *g) {
5381 if readgstatus(gp) != _Gdead {
5382 throw("gfput: bad status (not Gdead)")
5383 }
5384
5385 stksize := gp.stack.hi - gp.stack.lo
5386
5387 if stksize != uintptr(startingStackSize) {
5388
5389 stackfree(gp.stack)
5390 gp.stack.lo = 0
5391 gp.stack.hi = 0
5392 gp.stackguard0 = 0
5393 if valgrindenabled {
5394 valgrindDeregisterStack(gp.valgrindStackID)
5395 gp.valgrindStackID = 0
5396 }
5397 }
5398
5399 pp.gFree.push(gp)
5400 if pp.gFree.size >= 64 {
5401 var (
5402 stackQ gQueue
5403 noStackQ gQueue
5404 )
5405 for pp.gFree.size >= 32 {
5406 gp := pp.gFree.pop()
5407 if gp.stack.lo == 0 {
5408 noStackQ.push(gp)
5409 } else {
5410 stackQ.push(gp)
5411 }
5412 }
5413 lock(&sched.gFree.lock)
5414 sched.gFree.noStack.pushAll(noStackQ)
5415 sched.gFree.stack.pushAll(stackQ)
5416 unlock(&sched.gFree.lock)
5417 }
5418 }
5419
5420
5421
5422 func gfget(pp *p) *g {
5423 retry:
5424 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5425 lock(&sched.gFree.lock)
5426
5427 for pp.gFree.size < 32 {
5428
5429 gp := sched.gFree.stack.pop()
5430 if gp == nil {
5431 gp = sched.gFree.noStack.pop()
5432 if gp == nil {
5433 break
5434 }
5435 }
5436 pp.gFree.push(gp)
5437 }
5438 unlock(&sched.gFree.lock)
5439 goto retry
5440 }
5441 gp := pp.gFree.pop()
5442 if gp == nil {
5443 return nil
5444 }
5445 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5446
5447
5448
5449 systemstack(func() {
5450 stackfree(gp.stack)
5451 gp.stack.lo = 0
5452 gp.stack.hi = 0
5453 gp.stackguard0 = 0
5454 if valgrindenabled {
5455 valgrindDeregisterStack(gp.valgrindStackID)
5456 gp.valgrindStackID = 0
5457 }
5458 })
5459 }
5460 if gp.stack.lo == 0 {
5461
5462 systemstack(func() {
5463 gp.stack = stackalloc(startingStackSize)
5464 if valgrindenabled {
5465 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5466 }
5467 })
5468 gp.stackguard0 = gp.stack.lo + stackGuard
5469 } else {
5470 if raceenabled {
5471 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5472 }
5473 if msanenabled {
5474 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5475 }
5476 if asanenabled {
5477 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5478 }
5479 }
5480 return gp
5481 }
5482
5483
5484 func gfpurge(pp *p) {
5485 var (
5486 stackQ gQueue
5487 noStackQ gQueue
5488 )
5489 for !pp.gFree.empty() {
5490 gp := pp.gFree.pop()
5491 if gp.stack.lo == 0 {
5492 noStackQ.push(gp)
5493 } else {
5494 stackQ.push(gp)
5495 }
5496 }
5497 lock(&sched.gFree.lock)
5498 sched.gFree.noStack.pushAll(noStackQ)
5499 sched.gFree.stack.pushAll(stackQ)
5500 unlock(&sched.gFree.lock)
5501 }
5502
5503
5504 func Breakpoint() {
5505 breakpoint()
5506 }
5507
5508
5509
5510
5511
5512
5513 func dolockOSThread() {
5514 if GOARCH == "wasm" {
5515 return
5516 }
5517 gp := getg()
5518 gp.m.lockedg.set(gp)
5519 gp.lockedm.set(gp.m)
5520 }
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538 func LockOSThread() {
5539 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5540
5541
5542
5543 startTemplateThread()
5544 }
5545 gp := getg()
5546 gp.m.lockedExt++
5547 if gp.m.lockedExt == 0 {
5548 gp.m.lockedExt--
5549 panic("LockOSThread nesting overflow")
5550 }
5551 dolockOSThread()
5552 }
5553
5554
5555 func lockOSThread() {
5556 getg().m.lockedInt++
5557 dolockOSThread()
5558 }
5559
5560
5561
5562
5563
5564
5565 func dounlockOSThread() {
5566 if GOARCH == "wasm" {
5567 return
5568 }
5569 gp := getg()
5570 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5571 return
5572 }
5573 gp.m.lockedg = 0
5574 gp.lockedm = 0
5575 }
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591 func UnlockOSThread() {
5592 gp := getg()
5593 if gp.m.lockedExt == 0 {
5594 return
5595 }
5596 gp.m.lockedExt--
5597 dounlockOSThread()
5598 }
5599
5600
5601 func unlockOSThread() {
5602 gp := getg()
5603 if gp.m.lockedInt == 0 {
5604 systemstack(badunlockosthread)
5605 }
5606 gp.m.lockedInt--
5607 dounlockOSThread()
5608 }
5609
5610 func badunlockosthread() {
5611 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5612 }
5613
5614 func gcount(includeSys bool) int32 {
5615 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5616 if !includeSys {
5617 n -= sched.ngsys.Load()
5618 }
5619 for _, pp := range allp {
5620 n -= pp.gFree.size
5621 }
5622
5623
5624
5625 if n < 1 {
5626 n = 1
5627 }
5628 return n
5629 }
5630
5631
5632
5633
5634
5635 func goroutineleakcount() int {
5636 return work.goroutineLeak.count
5637 }
5638
5639 func mcount() int32 {
5640 return int32(sched.mnext - sched.nmfreed)
5641 }
5642
5643 var prof struct {
5644 signalLock atomic.Uint32
5645
5646
5647
5648 hz atomic.Int32
5649 }
5650
5651 func _System() { _System() }
5652 func _ExternalCode() { _ExternalCode() }
5653 func _LostExternalCode() { _LostExternalCode() }
5654 func _GC() { _GC() }
5655 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5656 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5657 func _VDSO() { _VDSO() }
5658
5659
5660
5661
5662
5663 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5664 if prof.hz.Load() == 0 {
5665 return
5666 }
5667
5668
5669
5670
5671 if mp != nil && mp.profilehz == 0 {
5672 return
5673 }
5674
5675
5676
5677
5678
5679
5680
5681 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5682 if f := findfunc(pc); f.valid() {
5683 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5684 cpuprof.lostAtomic++
5685 return
5686 }
5687 }
5688 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5689
5690
5691
5692 cpuprof.lostAtomic++
5693 return
5694 }
5695 }
5696
5697
5698
5699
5700
5701
5702
5703 getg().m.mallocing++
5704
5705 var u unwinder
5706 var stk [maxCPUProfStack]uintptr
5707 n := 0
5708 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5709 cgoOff := 0
5710
5711
5712
5713
5714
5715 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5716 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5717 cgoOff++
5718 }
5719 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5720 mp.cgoCallers[0] = 0
5721 }
5722
5723
5724 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5725 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5726
5727
5728 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5729 } else if mp != nil && mp.vdsoSP != 0 {
5730
5731
5732 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5733 } else {
5734 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5735 }
5736 n += tracebackPCs(&u, 0, stk[n:])
5737
5738 if n <= 0 {
5739
5740
5741 n = 2
5742 if inVDSOPage(pc) {
5743 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5744 } else if pc > firstmoduledata.etext {
5745
5746 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5747 }
5748 stk[0] = pc
5749 if mp.preemptoff != "" {
5750 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5751 } else {
5752 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5753 }
5754 }
5755
5756 if prof.hz.Load() != 0 {
5757
5758
5759
5760 var tagPtr *unsafe.Pointer
5761 if gp != nil && gp.m != nil && gp.m.curg != nil {
5762 tagPtr = &gp.m.curg.labels
5763 }
5764 cpuprof.add(tagPtr, stk[:n])
5765
5766 gprof := gp
5767 var mp *m
5768 var pp *p
5769 if gp != nil && gp.m != nil {
5770 if gp.m.curg != nil {
5771 gprof = gp.m.curg
5772 }
5773 mp = gp.m
5774 pp = gp.m.p.ptr()
5775 }
5776 traceCPUSample(gprof, mp, pp, stk[:n])
5777 }
5778 getg().m.mallocing--
5779 }
5780
5781
5782
5783 func setcpuprofilerate(hz int32) {
5784
5785 if hz < 0 {
5786 hz = 0
5787 }
5788
5789
5790
5791 gp := getg()
5792 gp.m.locks++
5793
5794
5795
5796
5797 setThreadCPUProfiler(0)
5798
5799 for !prof.signalLock.CompareAndSwap(0, 1) {
5800 osyield()
5801 }
5802 if prof.hz.Load() != hz {
5803 setProcessCPUProfiler(hz)
5804 prof.hz.Store(hz)
5805 }
5806 prof.signalLock.Store(0)
5807
5808 lock(&sched.lock)
5809 sched.profilehz = hz
5810 unlock(&sched.lock)
5811
5812 if hz != 0 {
5813 setThreadCPUProfiler(hz)
5814 }
5815
5816 gp.m.locks--
5817 }
5818
5819
5820
5821 func (pp *p) init(id int32) {
5822 pp.id = id
5823 pp.gcw.id = id
5824 pp.status = _Pgcstop
5825 pp.sudogcache = pp.sudogbuf[:0]
5826 pp.deferpool = pp.deferpoolbuf[:0]
5827 pp.wbBuf.reset()
5828 if pp.mcache == nil {
5829 if id == 0 {
5830 if mcache0 == nil {
5831 throw("missing mcache?")
5832 }
5833
5834
5835 pp.mcache = mcache0
5836 } else {
5837 pp.mcache = allocmcache()
5838 }
5839 }
5840 if raceenabled && pp.raceprocctx == 0 {
5841 if id == 0 {
5842 pp.raceprocctx = raceprocctx0
5843 raceprocctx0 = 0
5844 } else {
5845 pp.raceprocctx = raceproccreate()
5846 }
5847 }
5848 lockInit(&pp.timers.mu, lockRankTimers)
5849
5850
5851
5852 timerpMask.set(id)
5853
5854
5855 idlepMask.clear(id)
5856 }
5857
5858
5859
5860
5861
5862 func (pp *p) destroy() {
5863 assertLockHeld(&sched.lock)
5864 assertWorldStopped()
5865
5866
5867 for pp.runqhead != pp.runqtail {
5868
5869 pp.runqtail--
5870 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5871
5872 globrunqputhead(gp)
5873 }
5874 if pp.runnext != 0 {
5875 globrunqputhead(pp.runnext.ptr())
5876 pp.runnext = 0
5877 }
5878
5879
5880 getg().m.p.ptr().timers.take(&pp.timers)
5881
5882
5883
5884 if phase := gcphase; phase != _GCoff {
5885 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5886 throw("P destroyed while GC is running")
5887 }
5888
5889 pp.gcw.spanq.destroy()
5890
5891 clear(pp.sudogbuf[:])
5892 pp.sudogcache = pp.sudogbuf[:0]
5893 pp.pinnerCache = nil
5894 clear(pp.deferpoolbuf[:])
5895 pp.deferpool = pp.deferpoolbuf[:0]
5896 systemstack(func() {
5897 for i := 0; i < pp.mspancache.len; i++ {
5898
5899 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5900 }
5901 pp.mspancache.len = 0
5902 lock(&mheap_.lock)
5903 pp.pcache.flush(&mheap_.pages)
5904 unlock(&mheap_.lock)
5905 })
5906 freemcache(pp.mcache)
5907 pp.mcache = nil
5908 gfpurge(pp)
5909 if raceenabled {
5910 if pp.timers.raceCtx != 0 {
5911
5912
5913
5914
5915
5916 mp := getg().m
5917 phold := mp.p.ptr()
5918 mp.p.set(pp)
5919
5920 racectxend(pp.timers.raceCtx)
5921 pp.timers.raceCtx = 0
5922
5923 mp.p.set(phold)
5924 }
5925 raceprocdestroy(pp.raceprocctx)
5926 pp.raceprocctx = 0
5927 }
5928 pp.gcAssistTime = 0
5929 gcCleanups.queued += pp.cleanupsQueued
5930 pp.cleanupsQueued = 0
5931 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5932 pp.goroutinesCreated = 0
5933 pp.xRegs.free()
5934 pp.status = _Pdead
5935 }
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945 func procresize(nprocs int32) *p {
5946 assertLockHeld(&sched.lock)
5947 assertWorldStopped()
5948
5949 old := gomaxprocs
5950 if old < 0 || nprocs <= 0 {
5951 throw("procresize: invalid arg")
5952 }
5953 trace := traceAcquire()
5954 if trace.ok() {
5955 trace.Gomaxprocs(nprocs)
5956 traceRelease(trace)
5957 }
5958
5959
5960 now := nanotime()
5961 if sched.procresizetime != 0 {
5962 sched.totaltime += int64(old) * (now - sched.procresizetime)
5963 }
5964 sched.procresizetime = now
5965
5966
5967 if nprocs > int32(len(allp)) {
5968
5969
5970 lock(&allpLock)
5971 if nprocs <= int32(cap(allp)) {
5972 allp = allp[:nprocs]
5973 } else {
5974 nallp := make([]*p, nprocs)
5975
5976
5977 copy(nallp, allp[:cap(allp)])
5978 allp = nallp
5979 }
5980
5981 idlepMask = idlepMask.resize(nprocs)
5982 timerpMask = timerpMask.resize(nprocs)
5983 work.spanqMask = work.spanqMask.resize(nprocs)
5984 unlock(&allpLock)
5985 }
5986
5987
5988 for i := old; i < nprocs; i++ {
5989 pp := allp[i]
5990 if pp == nil {
5991 pp = new(p)
5992 }
5993 pp.init(i)
5994 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5995 }
5996
5997 gp := getg()
5998 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5999
6000 gp.m.p.ptr().status = _Prunning
6001 gp.m.p.ptr().mcache.prepareForSweep()
6002 } else {
6003
6004
6005
6006
6007
6008 if gp.m.p != 0 {
6009 trace := traceAcquire()
6010 if trace.ok() {
6011
6012
6013
6014 trace.GoSched()
6015 trace.ProcStop(gp.m.p.ptr())
6016 traceRelease(trace)
6017 }
6018 gp.m.p.ptr().m = 0
6019 }
6020 gp.m.p = 0
6021 pp := allp[0]
6022 pp.m = 0
6023 pp.status = _Pidle
6024 acquirep(pp)
6025 trace := traceAcquire()
6026 if trace.ok() {
6027 trace.GoStart()
6028 traceRelease(trace)
6029 }
6030 }
6031
6032
6033 mcache0 = nil
6034
6035
6036 for i := nprocs; i < old; i++ {
6037 pp := allp[i]
6038 pp.destroy()
6039
6040 }
6041
6042
6043 if int32(len(allp)) != nprocs {
6044 lock(&allpLock)
6045 allp = allp[:nprocs]
6046 idlepMask = idlepMask.resize(nprocs)
6047 timerpMask = timerpMask.resize(nprocs)
6048 work.spanqMask = work.spanqMask.resize(nprocs)
6049 unlock(&allpLock)
6050 }
6051
6052
6053 var runnablePs *p
6054 var runnablePsNeedM *p
6055 var idlePs *p
6056 for i := nprocs - 1; i >= 0; i-- {
6057 pp := allp[i]
6058 if gp.m.p.ptr() == pp {
6059 continue
6060 }
6061 pp.status = _Pidle
6062 if runqempty(pp) {
6063 pp.link.set(idlePs)
6064 idlePs = pp
6065 continue
6066 }
6067
6068
6069
6070
6071
6072
6073
6074
6075 var mp *m
6076 if oldm := pp.oldm.get(); oldm != nil {
6077
6078 mp = mgetSpecific(oldm)
6079 }
6080 if mp == nil {
6081
6082 pp.link.set(runnablePsNeedM)
6083 runnablePsNeedM = pp
6084 continue
6085 }
6086 pp.m.set(mp)
6087 pp.link.set(runnablePs)
6088 runnablePs = pp
6089 }
6090
6091
6092 for runnablePsNeedM != nil {
6093 pp := runnablePsNeedM
6094 runnablePsNeedM = pp.link.ptr()
6095
6096 mp := mget()
6097 pp.m.set(mp)
6098 pp.link.set(runnablePs)
6099 runnablePs = pp
6100 }
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126 if gcBlackenEnabled != 0 {
6127 for idlePs != nil {
6128 pp := idlePs
6129
6130 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6131 if !ok {
6132
6133 break
6134 }
6135
6136
6137
6138
6139
6140
6141
6142
6143 idlePs = pp.link.ptr()
6144 mp := mget()
6145 pp.m.set(mp)
6146 pp.link.set(runnablePs)
6147 runnablePs = pp
6148 }
6149 }
6150
6151
6152 for idlePs != nil {
6153 pp := idlePs
6154 idlePs = pp.link.ptr()
6155 pidleput(pp, now)
6156 }
6157
6158 stealOrder.reset(uint32(nprocs))
6159 var int32p *int32 = &gomaxprocs
6160 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6161 if old != nprocs {
6162
6163 gcCPULimiter.resetCapacity(now, nprocs)
6164 }
6165 return runnablePs
6166 }
6167
6168
6169
6170
6171
6172
6173
6174 func acquirep(pp *p) {
6175
6176 acquirepNoTrace(pp)
6177
6178
6179 trace := traceAcquire()
6180 if trace.ok() {
6181 trace.ProcStart()
6182 traceRelease(trace)
6183 }
6184 }
6185
6186
6187
6188
6189 func acquirepNoTrace(pp *p) {
6190
6191 wirep(pp)
6192
6193
6194
6195
6196
6197
6198 pp.oldm = pp.m.ptr().self
6199
6200
6201
6202 pp.mcache.prepareForSweep()
6203 }
6204
6205
6206
6207
6208
6209
6210
6211 func wirep(pp *p) {
6212 gp := getg()
6213
6214 if gp.m.p != 0 {
6215
6216
6217 systemstack(func() {
6218 throw("wirep: already in go")
6219 })
6220 }
6221 if pp.m != 0 || pp.status != _Pidle {
6222
6223
6224 systemstack(func() {
6225 id := int64(0)
6226 if pp.m != 0 {
6227 id = pp.m.ptr().id
6228 }
6229 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6230 throw("wirep: invalid p state")
6231 })
6232 }
6233 gp.m.p.set(pp)
6234 pp.m.set(gp.m)
6235 pp.status = _Prunning
6236 }
6237
6238
6239 func releasep() *p {
6240 trace := traceAcquire()
6241 if trace.ok() {
6242 trace.ProcStop(getg().m.p.ptr())
6243 traceRelease(trace)
6244 }
6245 return releasepNoTrace()
6246 }
6247
6248
6249 func releasepNoTrace() *p {
6250 gp := getg()
6251
6252 if gp.m.p == 0 {
6253 throw("releasep: invalid arg")
6254 }
6255 pp := gp.m.p.ptr()
6256 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6257 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6258 throw("releasep: invalid p state")
6259 }
6260
6261
6262 gcController.releaseNextGCMarkWorker(pp)
6263
6264 gp.m.p = 0
6265 pp.m = 0
6266 pp.status = _Pidle
6267 return pp
6268 }
6269
6270 func incidlelocked(v int32) {
6271 lock(&sched.lock)
6272 sched.nmidlelocked += v
6273 if v > 0 {
6274 checkdead()
6275 }
6276 unlock(&sched.lock)
6277 }
6278
6279
6280
6281
6282 func checkdead() {
6283 assertLockHeld(&sched.lock)
6284
6285
6286
6287
6288
6289
6290 if (islibrary || isarchive) && GOARCH != "wasm" {
6291 return
6292 }
6293
6294
6295
6296
6297
6298 if panicking.Load() > 0 {
6299 return
6300 }
6301
6302
6303
6304
6305
6306 var run0 int32
6307 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6308 run0 = 1
6309 }
6310
6311 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6312 if run > run0 {
6313 return
6314 }
6315 if run < 0 {
6316 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6317 unlock(&sched.lock)
6318 throw("checkdead: inconsistent counts")
6319 }
6320
6321 grunning := 0
6322 forEachG(func(gp *g) {
6323 if isSystemGoroutine(gp, false) {
6324 return
6325 }
6326 s := readgstatus(gp)
6327 switch s &^ _Gscan {
6328 case _Gwaiting,
6329 _Gpreempted:
6330 grunning++
6331 case _Grunnable,
6332 _Grunning,
6333 _Gsyscall:
6334 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6335 unlock(&sched.lock)
6336 throw("checkdead: runnable g")
6337 }
6338 })
6339 if grunning == 0 {
6340 unlock(&sched.lock)
6341 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6342 }
6343
6344
6345 if faketime != 0 {
6346 if when := timeSleepUntil(); when < maxWhen {
6347 faketime = when
6348
6349
6350 pp, _ := pidleget(faketime)
6351 if pp == nil {
6352
6353
6354 unlock(&sched.lock)
6355 throw("checkdead: no p for timer")
6356 }
6357 mp := mget()
6358 if mp == nil {
6359
6360
6361 unlock(&sched.lock)
6362 throw("checkdead: no m for timer")
6363 }
6364
6365
6366
6367 sched.nmspinning.Add(1)
6368 mp.spinning = true
6369 mp.nextp.set(pp)
6370 notewakeup(&mp.park)
6371 return
6372 }
6373 }
6374
6375
6376 for _, pp := range allp {
6377 if len(pp.timers.heap) > 0 {
6378 return
6379 }
6380 }
6381
6382 unlock(&sched.lock)
6383 fatal("all goroutines are asleep - deadlock!")
6384 }
6385
6386
6387
6388
6389
6390
6391 var forcegcperiod int64 = 2 * 60 * 1e9
6392
6393
6394
6395
6396 const haveSysmon = GOARCH != "wasm"
6397
6398
6399
6400
6401 func sysmon() {
6402 lock(&sched.lock)
6403 sched.nmsys++
6404 checkdead()
6405 unlock(&sched.lock)
6406
6407 lastgomaxprocs := int64(0)
6408 lasttrace := int64(0)
6409 idle := 0
6410 delay := uint32(0)
6411
6412 for {
6413 if idle == 0 {
6414 delay = 20
6415 } else if idle > 50 {
6416 delay *= 2
6417 }
6418 if delay > 10*1000 {
6419 delay = 10 * 1000
6420 }
6421 usleep(delay)
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438 now := nanotime()
6439 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6440 lock(&sched.lock)
6441 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6442 syscallWake := false
6443 next := timeSleepUntil()
6444 if next > now {
6445 sched.sysmonwait.Store(true)
6446 unlock(&sched.lock)
6447
6448
6449 sleep := forcegcperiod / 2
6450 if next-now < sleep {
6451 sleep = next - now
6452 }
6453 shouldRelax := sleep >= osRelaxMinNS
6454 if shouldRelax {
6455 osRelax(true)
6456 }
6457 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6458 if shouldRelax {
6459 osRelax(false)
6460 }
6461 lock(&sched.lock)
6462 sched.sysmonwait.Store(false)
6463 noteclear(&sched.sysmonnote)
6464 }
6465 if syscallWake {
6466 idle = 0
6467 delay = 20
6468 }
6469 }
6470 unlock(&sched.lock)
6471 }
6472
6473 lock(&sched.sysmonlock)
6474
6475
6476 now = nanotime()
6477
6478
6479 if *cgo_yield != nil {
6480 asmcgocall(*cgo_yield, nil)
6481 }
6482
6483 lastpoll := sched.lastpoll.Load()
6484 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6485 sched.lastpoll.CompareAndSwap(lastpoll, now)
6486 list, delta := netpoll(0)
6487 if !list.empty() {
6488
6489
6490
6491
6492
6493
6494
6495 incidlelocked(-1)
6496 injectglist(&list)
6497 incidlelocked(1)
6498 netpollAdjustWaiters(delta)
6499 }
6500 }
6501
6502 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6503 sysmonUpdateGOMAXPROCS()
6504 lastgomaxprocs = now
6505 }
6506 if scavenger.sysmonWake.Load() != 0 {
6507
6508 scavenger.wake()
6509 }
6510
6511
6512 if retake(now) != 0 {
6513 idle = 0
6514 } else {
6515 idle++
6516 }
6517
6518 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6519 lock(&forcegc.lock)
6520 forcegc.idle.Store(false)
6521 var list gList
6522 list.push(forcegc.g)
6523 injectglist(&list)
6524 unlock(&forcegc.lock)
6525 }
6526 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6527 lasttrace = now
6528 schedtrace(debug.scheddetail > 0)
6529 }
6530 unlock(&sched.sysmonlock)
6531 }
6532 }
6533
6534 type sysmontick struct {
6535 schedtick uint32
6536 syscalltick uint32
6537 schedwhen int64
6538 syscallwhen int64
6539 }
6540
6541
6542
6543 const forcePreemptNS = 10 * 1000 * 1000
6544
6545 func retake(now int64) uint32 {
6546 n := 0
6547
6548
6549 lock(&allpLock)
6550
6551
6552
6553 for i := 0; i < len(allp); i++ {
6554
6555
6556
6557
6558
6559
6560
6561
6562 pp := allp[i]
6563 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6564
6565
6566 continue
6567 }
6568 pd := &pp.sysmontick
6569 sysretake := false
6570
6571
6572
6573
6574
6575 schedt := int64(pp.schedtick)
6576 if int64(pd.schedtick) != schedt {
6577 pd.schedtick = uint32(schedt)
6578 pd.schedwhen = now
6579 } else if pd.schedwhen+forcePreemptNS <= now {
6580 preemptone(pp)
6581
6582
6583
6584
6585 sysretake = true
6586 }
6587
6588
6589 unlock(&allpLock)
6590
6591
6592
6593
6594
6595
6596
6597
6598 incidlelocked(-1)
6599
6600
6601 thread, ok := setBlockOnExitSyscall(pp)
6602 if !ok {
6603
6604 goto done
6605 }
6606
6607
6608 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6609 pd.syscalltick = uint32(syst)
6610 pd.syscallwhen = now
6611 thread.resume()
6612 goto done
6613 }
6614
6615
6616
6617
6618 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6619 thread.resume()
6620 goto done
6621 }
6622
6623
6624
6625 thread.takeP()
6626 thread.resume()
6627 n++
6628
6629
6630 handoffp(pp)
6631
6632
6633
6634 done:
6635 incidlelocked(1)
6636 lock(&allpLock)
6637 }
6638 unlock(&allpLock)
6639 return uint32(n)
6640 }
6641
6642
6643
6644 type syscallingThread struct {
6645 gp *g
6646 mp *m
6647 pp *p
6648 status uint32
6649 }
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6666 if pp.status != _Prunning {
6667 return syscallingThread{}, false
6668 }
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680 mp := pp.m.ptr()
6681 if mp == nil {
6682
6683 return syscallingThread{}, false
6684 }
6685 gp := mp.curg
6686 if gp == nil {
6687
6688 return syscallingThread{}, false
6689 }
6690 status := readgstatus(gp) &^ _Gscan
6691
6692
6693
6694
6695 if status != _Gsyscall && status != _Gdeadextra {
6696
6697 return syscallingThread{}, false
6698 }
6699 if !castogscanstatus(gp, status, status|_Gscan) {
6700
6701 return syscallingThread{}, false
6702 }
6703 if gp.m != mp || gp.m.p.ptr() != pp {
6704
6705 casfrom_Gscanstatus(gp, status|_Gscan, status)
6706 return syscallingThread{}, false
6707 }
6708 return syscallingThread{gp, mp, pp, status}, true
6709 }
6710
6711
6712
6713
6714
6715 func (s syscallingThread) gcstopP() {
6716 assertLockHeld(&sched.lock)
6717
6718 s.releaseP(_Pgcstop)
6719 s.pp.gcStopTime = nanotime()
6720 sched.stopwait--
6721 }
6722
6723
6724
6725 func (s syscallingThread) takeP() {
6726 s.releaseP(_Pidle)
6727 }
6728
6729
6730
6731
6732 func (s syscallingThread) releaseP(state uint32) {
6733 if state != _Pidle && state != _Pgcstop {
6734 throw("attempted to release P into a bad state")
6735 }
6736 trace := traceAcquire()
6737 s.pp.m = 0
6738 s.mp.p = 0
6739 atomic.Store(&s.pp.status, state)
6740 if trace.ok() {
6741 trace.ProcSteal(s.pp)
6742 traceRelease(trace)
6743 }
6744 sched.nGsyscallNoP.Add(1)
6745 s.pp.syscalltick++
6746 }
6747
6748
6749 func (s syscallingThread) resume() {
6750 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6751 }
6752
6753
6754
6755
6756
6757
6758 func preemptall() bool {
6759 res := false
6760 for _, pp := range allp {
6761 if pp.status != _Prunning {
6762 continue
6763 }
6764 if preemptone(pp) {
6765 res = true
6766 }
6767 }
6768 return res
6769 }
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781 func preemptone(pp *p) bool {
6782 mp := pp.m.ptr()
6783 if mp == nil || mp == getg().m {
6784 return false
6785 }
6786 gp := mp.curg
6787 if gp == nil || gp == mp.g0 {
6788 return false
6789 }
6790 if readgstatus(gp)&^_Gscan == _Gsyscall {
6791
6792 return false
6793 }
6794
6795 gp.preempt = true
6796
6797
6798
6799
6800
6801 gp.stackguard0 = stackPreempt
6802
6803
6804 if preemptMSupported && debug.asyncpreemptoff == 0 {
6805 pp.preempt = true
6806 preemptM(mp)
6807 }
6808
6809 return true
6810 }
6811
6812 var starttime int64
6813
6814 func schedtrace(detailed bool) {
6815 now := nanotime()
6816 if starttime == 0 {
6817 starttime = now
6818 }
6819
6820 lock(&sched.lock)
6821 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6822 if detailed {
6823 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6824 }
6825
6826
6827
6828 for i, pp := range allp {
6829 h := atomic.Load(&pp.runqhead)
6830 t := atomic.Load(&pp.runqtail)
6831 if detailed {
6832 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6833 mp := pp.m.ptr()
6834 if mp != nil {
6835 print(mp.id)
6836 } else {
6837 print("nil")
6838 }
6839 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6840 } else {
6841
6842
6843 print(" ")
6844 if i == 0 {
6845 print("[ ")
6846 }
6847 print(t - h)
6848 if i == len(allp)-1 {
6849 print(" ]")
6850 }
6851 }
6852 }
6853
6854 if !detailed {
6855
6856 print(" schedticks=[ ")
6857 for _, pp := range allp {
6858 print(pp.schedtick)
6859 print(" ")
6860 }
6861 print("]\n")
6862 }
6863
6864 if !detailed {
6865 unlock(&sched.lock)
6866 return
6867 }
6868
6869 for mp := allm; mp != nil; mp = mp.alllink {
6870 pp := mp.p.ptr()
6871 print(" M", mp.id, ": p=")
6872 if pp != nil {
6873 print(pp.id)
6874 } else {
6875 print("nil")
6876 }
6877 print(" curg=")
6878 if mp.curg != nil {
6879 print(mp.curg.goid)
6880 } else {
6881 print("nil")
6882 }
6883 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6884 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6885 print(lockedg.goid)
6886 } else {
6887 print("nil")
6888 }
6889 print("\n")
6890 }
6891
6892 forEachG(func(gp *g) {
6893 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6894 if gp.m != nil {
6895 print(gp.m.id)
6896 } else {
6897 print("nil")
6898 }
6899 print(" lockedm=")
6900 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6901 print(lockedm.id)
6902 } else {
6903 print("nil")
6904 }
6905 print("\n")
6906 })
6907 unlock(&sched.lock)
6908 }
6909
6910 type updateMaxProcsGState struct {
6911 lock mutex
6912 g *g
6913 idle atomic.Bool
6914
6915
6916 procs int32
6917 }
6918
6919 var (
6920
6921
6922 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6923
6924
6925
6926 updateMaxProcsG updateMaxProcsGState
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975 computeMaxProcsLock mutex
6976 )
6977
6978
6979
6980
6981 func defaultGOMAXPROCSUpdateEnable() {
6982 if debug.updatemaxprocs == 0 {
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994 updatemaxprocs.IncNonDefault()
6995 return
6996 }
6997
6998 go updateMaxProcsGoroutine()
6999 }
7000
7001 func updateMaxProcsGoroutine() {
7002 updateMaxProcsG.g = getg()
7003 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7004 for {
7005 lock(&updateMaxProcsG.lock)
7006 if updateMaxProcsG.idle.Load() {
7007 throw("updateMaxProcsGoroutine: phase error")
7008 }
7009 updateMaxProcsG.idle.Store(true)
7010 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7011
7012
7013 stw := stopTheWorldGC(stwGOMAXPROCS)
7014
7015
7016 lock(&sched.lock)
7017 custom := sched.customGOMAXPROCS
7018 unlock(&sched.lock)
7019 if custom {
7020 startTheWorldGC(stw)
7021 return
7022 }
7023
7024
7025
7026
7027
7028 newprocs = updateMaxProcsG.procs
7029 lock(&sched.lock)
7030 sched.customGOMAXPROCS = false
7031 unlock(&sched.lock)
7032
7033 startTheWorldGC(stw)
7034 }
7035 }
7036
7037 func sysmonUpdateGOMAXPROCS() {
7038
7039 lock(&computeMaxProcsLock)
7040
7041
7042 lock(&sched.lock)
7043 custom := sched.customGOMAXPROCS
7044 curr := gomaxprocs
7045 unlock(&sched.lock)
7046 if custom {
7047 unlock(&computeMaxProcsLock)
7048 return
7049 }
7050
7051
7052 procs := defaultGOMAXPROCS(0)
7053 unlock(&computeMaxProcsLock)
7054 if procs == curr {
7055
7056 return
7057 }
7058
7059
7060
7061
7062 if updateMaxProcsG.idle.Load() {
7063 lock(&updateMaxProcsG.lock)
7064 updateMaxProcsG.procs = procs
7065 updateMaxProcsG.idle.Store(false)
7066 var list gList
7067 list.push(updateMaxProcsG.g)
7068 injectglist(&list)
7069 unlock(&updateMaxProcsG.lock)
7070 }
7071 }
7072
7073
7074
7075
7076
7077
7078 func schedEnableUser(enable bool) {
7079 lock(&sched.lock)
7080 if sched.disable.user == !enable {
7081 unlock(&sched.lock)
7082 return
7083 }
7084 sched.disable.user = !enable
7085 if enable {
7086 n := sched.disable.runnable.size
7087 globrunqputbatch(&sched.disable.runnable)
7088 unlock(&sched.lock)
7089 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7090 startm(nil, false, false)
7091 }
7092 } else {
7093 unlock(&sched.lock)
7094 }
7095 }
7096
7097
7098
7099
7100
7101 func schedEnabled(gp *g) bool {
7102 assertLockHeld(&sched.lock)
7103
7104 if sched.disable.user {
7105 return isSystemGoroutine(gp, true)
7106 }
7107 return true
7108 }
7109
7110
7111
7112
7113
7114
7115 func mput(mp *m) {
7116 assertLockHeld(&sched.lock)
7117
7118 sched.midle.push(unsafe.Pointer(mp))
7119 sched.nmidle++
7120 checkdead()
7121 }
7122
7123
7124
7125
7126
7127
7128 func mget() *m {
7129 assertLockHeld(&sched.lock)
7130
7131 mp := (*m)(sched.midle.pop())
7132 if mp != nil {
7133 sched.nmidle--
7134 }
7135 return mp
7136 }
7137
7138
7139
7140
7141
7142
7143
7144
7145 func mgetSpecific(mp *m) *m {
7146 assertLockHeld(&sched.lock)
7147
7148 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7149
7150 return nil
7151 }
7152
7153 sched.midle.remove(unsafe.Pointer(mp))
7154 sched.nmidle--
7155
7156 return mp
7157 }
7158
7159
7160
7161
7162
7163
7164 func globrunqput(gp *g) {
7165 assertLockHeld(&sched.lock)
7166
7167 sched.runq.pushBack(gp)
7168 }
7169
7170
7171
7172
7173
7174
7175 func globrunqputhead(gp *g) {
7176 assertLockHeld(&sched.lock)
7177
7178 sched.runq.push(gp)
7179 }
7180
7181
7182
7183
7184
7185
7186
7187 func globrunqputbatch(batch *gQueue) {
7188 assertLockHeld(&sched.lock)
7189
7190 sched.runq.pushBackAll(*batch)
7191 *batch = gQueue{}
7192 }
7193
7194
7195
7196 func globrunqget() *g {
7197 assertLockHeld(&sched.lock)
7198
7199 if sched.runq.size == 0 {
7200 return nil
7201 }
7202
7203 return sched.runq.pop()
7204 }
7205
7206
7207
7208 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7209 assertLockHeld(&sched.lock)
7210
7211 if sched.runq.size == 0 {
7212 return
7213 }
7214
7215 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7216
7217 gp = sched.runq.pop()
7218 n--
7219
7220 for ; n > 0; n-- {
7221 gp1 := sched.runq.pop()
7222 q.pushBack(gp1)
7223 }
7224 return
7225 }
7226
7227
7228 type pMask []uint32
7229
7230
7231 func (p pMask) read(id uint32) bool {
7232 word := id / 32
7233 mask := uint32(1) << (id % 32)
7234 return (atomic.Load(&p[word]) & mask) != 0
7235 }
7236
7237
7238 func (p pMask) set(id int32) {
7239 word := id / 32
7240 mask := uint32(1) << (id % 32)
7241 atomic.Or(&p[word], mask)
7242 }
7243
7244
7245 func (p pMask) clear(id int32) {
7246 word := id / 32
7247 mask := uint32(1) << (id % 32)
7248 atomic.And(&p[word], ^mask)
7249 }
7250
7251
7252 func (p pMask) any() bool {
7253 for i := range p {
7254 if atomic.Load(&p[i]) != 0 {
7255 return true
7256 }
7257 }
7258 return false
7259 }
7260
7261
7262
7263
7264
7265 func (p pMask) resize(nprocs int32) pMask {
7266 maskWords := (nprocs + 31) / 32
7267
7268 if maskWords <= int32(cap(p)) {
7269 return p[:maskWords]
7270 }
7271 newMask := make([]uint32, maskWords)
7272
7273 copy(newMask, p)
7274 return newMask
7275 }
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288 func pidleput(pp *p, now int64) int64 {
7289 assertLockHeld(&sched.lock)
7290
7291 if !runqempty(pp) {
7292 throw("pidleput: P has non-empty run queue")
7293 }
7294 if now == 0 {
7295 now = nanotime()
7296 }
7297 if pp.timers.len.Load() == 0 {
7298 timerpMask.clear(pp.id)
7299 }
7300 idlepMask.set(pp.id)
7301 pp.link = sched.pidle
7302 sched.pidle.set(pp)
7303 sched.npidle.Add(1)
7304 if !pp.limiterEvent.start(limiterEventIdle, now) {
7305 throw("must be able to track idle limiter event")
7306 }
7307 return now
7308 }
7309
7310
7311
7312
7313
7314
7315
7316
7317 func pidleget(now int64) (*p, int64) {
7318 assertLockHeld(&sched.lock)
7319
7320 pp := sched.pidle.ptr()
7321 if pp != nil {
7322
7323 if now == 0 {
7324 now = nanotime()
7325 }
7326 timerpMask.set(pp.id)
7327 idlepMask.clear(pp.id)
7328 sched.pidle = pp.link
7329 sched.npidle.Add(-1)
7330 pp.limiterEvent.stop(limiterEventIdle, now)
7331 }
7332 return pp, now
7333 }
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344
7345 func pidlegetSpinning(now int64) (*p, int64) {
7346 assertLockHeld(&sched.lock)
7347
7348 pp, now := pidleget(now)
7349 if pp == nil {
7350
7351
7352
7353 sched.needspinning.Store(1)
7354 return nil, now
7355 }
7356
7357 return pp, now
7358 }
7359
7360
7361
7362 func runqempty(pp *p) bool {
7363
7364
7365
7366
7367 for {
7368 head := atomic.Load(&pp.runqhead)
7369 tail := atomic.Load(&pp.runqtail)
7370 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7371 if tail == atomic.Load(&pp.runqtail) {
7372 return head == tail && runnext == 0
7373 }
7374 }
7375 }
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386 const randomizeScheduler = raceenabled
7387
7388
7389
7390
7391
7392
7393 func runqput(pp *p, gp *g, next bool) {
7394 if !haveSysmon && next {
7395
7396
7397
7398
7399
7400
7401
7402
7403 next = false
7404 }
7405 if randomizeScheduler && next && randn(2) == 0 {
7406 next = false
7407 }
7408
7409 if next {
7410 retryNext:
7411 oldnext := pp.runnext
7412 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7413 goto retryNext
7414 }
7415 if oldnext == 0 {
7416 return
7417 }
7418
7419 gp = oldnext.ptr()
7420 }
7421
7422 retry:
7423 h := atomic.LoadAcq(&pp.runqhead)
7424 t := pp.runqtail
7425 if t-h < uint32(len(pp.runq)) {
7426 pp.runq[t%uint32(len(pp.runq))].set(gp)
7427 atomic.StoreRel(&pp.runqtail, t+1)
7428 return
7429 }
7430 if runqputslow(pp, gp, h, t) {
7431 return
7432 }
7433
7434 goto retry
7435 }
7436
7437
7438
7439 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7440 var batch [len(pp.runq)/2 + 1]*g
7441
7442
7443 n := t - h
7444 n = n / 2
7445 if n != uint32(len(pp.runq)/2) {
7446 throw("runqputslow: queue is not full")
7447 }
7448 for i := uint32(0); i < n; i++ {
7449 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7450 }
7451 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7452 return false
7453 }
7454 batch[n] = gp
7455
7456 if randomizeScheduler {
7457 for i := uint32(1); i <= n; i++ {
7458 j := cheaprandn(i + 1)
7459 batch[i], batch[j] = batch[j], batch[i]
7460 }
7461 }
7462
7463
7464 for i := uint32(0); i < n; i++ {
7465 batch[i].schedlink.set(batch[i+1])
7466 }
7467
7468 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7469
7470
7471 lock(&sched.lock)
7472 globrunqputbatch(&q)
7473 unlock(&sched.lock)
7474 return true
7475 }
7476
7477
7478
7479
7480 func runqputbatch(pp *p, q *gQueue) {
7481 if q.empty() {
7482 return
7483 }
7484 h := atomic.LoadAcq(&pp.runqhead)
7485 t := pp.runqtail
7486 n := uint32(0)
7487 for !q.empty() && t-h < uint32(len(pp.runq)) {
7488 gp := q.pop()
7489 pp.runq[t%uint32(len(pp.runq))].set(gp)
7490 t++
7491 n++
7492 }
7493
7494 if randomizeScheduler {
7495 off := func(o uint32) uint32 {
7496 return (pp.runqtail + o) % uint32(len(pp.runq))
7497 }
7498 for i := uint32(1); i < n; i++ {
7499 j := cheaprandn(i + 1)
7500 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7501 }
7502 }
7503
7504 atomic.StoreRel(&pp.runqtail, t)
7505
7506 return
7507 }
7508
7509
7510
7511
7512
7513 func runqget(pp *p) (gp *g, inheritTime bool) {
7514
7515 next := pp.runnext
7516
7517
7518
7519 if next != 0 && pp.runnext.cas(next, 0) {
7520 return next.ptr(), true
7521 }
7522
7523 for {
7524 h := atomic.LoadAcq(&pp.runqhead)
7525 t := pp.runqtail
7526 if t == h {
7527 return nil, false
7528 }
7529 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7530 if atomic.CasRel(&pp.runqhead, h, h+1) {
7531 return gp, false
7532 }
7533 }
7534 }
7535
7536
7537
7538 func runqdrain(pp *p) (drainQ gQueue) {
7539 oldNext := pp.runnext
7540 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7541 drainQ.pushBack(oldNext.ptr())
7542 }
7543
7544 retry:
7545 h := atomic.LoadAcq(&pp.runqhead)
7546 t := pp.runqtail
7547 qn := t - h
7548 if qn == 0 {
7549 return
7550 }
7551 if qn > uint32(len(pp.runq)) {
7552 goto retry
7553 }
7554
7555 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7556 goto retry
7557 }
7558
7559
7560
7561
7562
7563
7564
7565
7566 for i := uint32(0); i < qn; i++ {
7567 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7568 drainQ.pushBack(gp)
7569 }
7570 return
7571 }
7572
7573
7574
7575
7576
7577 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7578 for {
7579 h := atomic.LoadAcq(&pp.runqhead)
7580 t := atomic.LoadAcq(&pp.runqtail)
7581 n := t - h
7582 n = n - n/2
7583 if n == 0 {
7584 if stealRunNextG {
7585
7586 if next := pp.runnext; next != 0 {
7587 if pp.status == _Prunning {
7588 if mp := pp.m.ptr(); mp != nil {
7589 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609 if !osHasLowResTimer {
7610 usleep(3)
7611 } else {
7612
7613
7614
7615 osyield()
7616 }
7617 }
7618 }
7619 }
7620 if !pp.runnext.cas(next, 0) {
7621 continue
7622 }
7623 batch[batchHead%uint32(len(batch))] = next
7624 return 1
7625 }
7626 }
7627 return 0
7628 }
7629 if n > uint32(len(pp.runq)/2) {
7630 continue
7631 }
7632 for i := uint32(0); i < n; i++ {
7633 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7634 batch[(batchHead+i)%uint32(len(batch))] = g
7635 }
7636 if atomic.CasRel(&pp.runqhead, h, h+n) {
7637 return n
7638 }
7639 }
7640 }
7641
7642
7643
7644
7645 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7646 t := pp.runqtail
7647 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7648 if n == 0 {
7649 return nil
7650 }
7651 n--
7652 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7653 if n == 0 {
7654 return gp
7655 }
7656 h := atomic.LoadAcq(&pp.runqhead)
7657 if t-h+n >= uint32(len(pp.runq)) {
7658 throw("runqsteal: runq overflow")
7659 }
7660 atomic.StoreRel(&pp.runqtail, t+n)
7661 return gp
7662 }
7663
7664
7665
7666 type gQueue struct {
7667 head guintptr
7668 tail guintptr
7669 size int32
7670 }
7671
7672
7673 func (q *gQueue) empty() bool {
7674 return q.head == 0
7675 }
7676
7677
7678 func (q *gQueue) push(gp *g) {
7679 gp.schedlink = q.head
7680 q.head.set(gp)
7681 if q.tail == 0 {
7682 q.tail.set(gp)
7683 }
7684 q.size++
7685 }
7686
7687
7688 func (q *gQueue) pushBack(gp *g) {
7689 gp.schedlink = 0
7690 if q.tail != 0 {
7691 q.tail.ptr().schedlink.set(gp)
7692 } else {
7693 q.head.set(gp)
7694 }
7695 q.tail.set(gp)
7696 q.size++
7697 }
7698
7699
7700
7701 func (q *gQueue) pushBackAll(q2 gQueue) {
7702 if q2.tail == 0 {
7703 return
7704 }
7705 q2.tail.ptr().schedlink = 0
7706 if q.tail != 0 {
7707 q.tail.ptr().schedlink = q2.head
7708 } else {
7709 q.head = q2.head
7710 }
7711 q.tail = q2.tail
7712 q.size += q2.size
7713 }
7714
7715
7716
7717 func (q *gQueue) pop() *g {
7718 gp := q.head.ptr()
7719 if gp != nil {
7720 q.head = gp.schedlink
7721 if q.head == 0 {
7722 q.tail = 0
7723 }
7724 q.size--
7725 }
7726 return gp
7727 }
7728
7729
7730 func (q *gQueue) popList() gList {
7731 stack := gList{q.head, q.size}
7732 *q = gQueue{}
7733 return stack
7734 }
7735
7736
7737
7738 type gList struct {
7739 head guintptr
7740 size int32
7741 }
7742
7743
7744 func (l *gList) empty() bool {
7745 return l.head == 0
7746 }
7747
7748
7749 func (l *gList) push(gp *g) {
7750 gp.schedlink = l.head
7751 l.head.set(gp)
7752 l.size++
7753 }
7754
7755
7756 func (l *gList) pushAll(q gQueue) {
7757 if !q.empty() {
7758 q.tail.ptr().schedlink = l.head
7759 l.head = q.head
7760 l.size += q.size
7761 }
7762 }
7763
7764
7765 func (l *gList) pop() *g {
7766 gp := l.head.ptr()
7767 if gp != nil {
7768 l.head = gp.schedlink
7769 l.size--
7770 }
7771 return gp
7772 }
7773
7774
7775 func setMaxThreads(in int) (out int) {
7776 lock(&sched.lock)
7777 out = int(sched.maxmcount)
7778 if in > 0x7fffffff {
7779 sched.maxmcount = 0x7fffffff
7780 } else {
7781 sched.maxmcount = int32(in)
7782 }
7783 checkmcount()
7784 unlock(&sched.lock)
7785 return
7786 }
7787
7788
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800 func procPin() int {
7801 gp := getg()
7802 mp := gp.m
7803
7804 mp.locks++
7805 return int(mp.p.ptr().id)
7806 }
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820 func procUnpin() {
7821 gp := getg()
7822 gp.m.locks--
7823 }
7824
7825
7826
7827 func sync_runtime_procPin() int {
7828 return procPin()
7829 }
7830
7831
7832
7833 func sync_runtime_procUnpin() {
7834 procUnpin()
7835 }
7836
7837
7838
7839 func sync_atomic_runtime_procPin() int {
7840 return procPin()
7841 }
7842
7843
7844
7845 func sync_atomic_runtime_procUnpin() {
7846 procUnpin()
7847 }
7848
7849
7850
7851
7852
7853 func internal_sync_runtime_canSpin(i int) bool {
7854
7855
7856
7857
7858
7859 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7860 return false
7861 }
7862 if p := getg().m.p.ptr(); !runqempty(p) {
7863 return false
7864 }
7865 return true
7866 }
7867
7868
7869
7870 func internal_sync_runtime_doSpin() {
7871 procyield(active_spin_cnt)
7872 }
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888 func sync_runtime_canSpin(i int) bool {
7889 return internal_sync_runtime_canSpin(i)
7890 }
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904 func sync_runtime_doSpin() {
7905 internal_sync_runtime_doSpin()
7906 }
7907
7908 var stealOrder randomOrder
7909
7910
7911
7912
7913
7914 type randomOrder struct {
7915 count uint32
7916 coprimes []uint32
7917 }
7918
7919 type randomEnum struct {
7920 i uint32
7921 count uint32
7922 pos uint32
7923 inc uint32
7924 }
7925
7926 func (ord *randomOrder) reset(count uint32) {
7927 ord.count = count
7928 ord.coprimes = ord.coprimes[:0]
7929 for i := uint32(1); i <= count; i++ {
7930 if gcd(i, count) == 1 {
7931 ord.coprimes = append(ord.coprimes, i)
7932 }
7933 }
7934 }
7935
7936 func (ord *randomOrder) start(i uint32) randomEnum {
7937 return randomEnum{
7938 count: ord.count,
7939 pos: i % ord.count,
7940 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7941 }
7942 }
7943
7944 func (enum *randomEnum) done() bool {
7945 return enum.i == enum.count
7946 }
7947
7948 func (enum *randomEnum) next() {
7949 enum.i++
7950 enum.pos = (enum.pos + enum.inc) % enum.count
7951 }
7952
7953 func (enum *randomEnum) position() uint32 {
7954 return enum.pos
7955 }
7956
7957 func gcd(a, b uint32) uint32 {
7958 for b != 0 {
7959 a, b = b, a%b
7960 }
7961 return a
7962 }
7963
7964
7965
7966 type initTask struct {
7967 state uint32
7968 nfns uint32
7969
7970 }
7971
7972
7973
7974 var inittrace tracestat
7975
7976 type tracestat struct {
7977 active bool
7978 id uint64
7979 allocs uint64
7980 bytes uint64
7981 }
7982
7983 func doInit(ts []*initTask) {
7984 for _, t := range ts {
7985 doInit1(t)
7986 }
7987 }
7988
7989 func doInit1(t *initTask) {
7990 switch t.state {
7991 case 2:
7992 return
7993 case 1:
7994 throw("recursive call during initialization - linker skew")
7995 default:
7996 t.state = 1
7997
7998 var (
7999 start int64
8000 before tracestat
8001 )
8002
8003 if inittrace.active {
8004 start = nanotime()
8005
8006 before = inittrace
8007 }
8008
8009 if t.nfns == 0 {
8010
8011 throw("inittask with no functions")
8012 }
8013
8014 firstFunc := add(unsafe.Pointer(t), 8)
8015 for i := uint32(0); i < t.nfns; i++ {
8016 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8017 f := *(*func())(unsafe.Pointer(&p))
8018 f()
8019 }
8020
8021 if inittrace.active {
8022 end := nanotime()
8023
8024 after := inittrace
8025
8026 f := *(*func())(unsafe.Pointer(&firstFunc))
8027 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8028
8029 var sbuf [24]byte
8030 print("init ", pkg, " @")
8031 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8032 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8033 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8034 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8035 print("\n")
8036 }
8037
8038 t.state = 2
8039 }
8040 }
8041
View as plain text