Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/strconv"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291
292
293
294
295
296
297
298 exitHooksRun := false
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324 if raceenabled {
325 racefini()
326 }
327
328 exit(0)
329 for {
330 var x *int32
331 *x = 0
332 }
333 }
334
335
336
337
338 func os_beforeExit(exitCode int) {
339 runExitHooks(exitCode)
340 if exitCode == 0 && raceenabled {
341 racefini()
342 }
343
344
345 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
346 lsandoleakcheck()
347 }
348 }
349
350 func init() {
351 exithook.Gosched = Gosched
352 exithook.Goid = func() uint64 { return getg().goid }
353 exithook.Throw = throw
354 }
355
356 func runExitHooks(code int) {
357 exithook.Run(code)
358 }
359
360
361 func init() {
362 go forcegchelper()
363 }
364
365 func forcegchelper() {
366 forcegc.g = getg()
367 lockInit(&forcegc.lock, lockRankForcegc)
368 for {
369 lock(&forcegc.lock)
370 if forcegc.idle.Load() {
371 throw("forcegc: phase error")
372 }
373 forcegc.idle.Store(true)
374 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
375
376 if debug.gctrace > 0 {
377 println("GC forced")
378 }
379
380 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
381 }
382 }
383
384
385
386
387
388 func Gosched() {
389 checkTimeouts()
390 mcall(gosched_m)
391 }
392
393
394
395
396
397 func goschedguarded() {
398 mcall(goschedguarded_m)
399 }
400
401
402
403
404
405
406 func goschedIfBusy() {
407 gp := getg()
408
409
410 if !gp.preempt && sched.npidle.Load() > 0 {
411 return
412 }
413 mcall(gosched_m)
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
445 if reason != waitReasonSleep {
446 checkTimeouts()
447 }
448 mp := acquirem()
449 gp := mp.curg
450 status := readgstatus(gp)
451 if status != _Grunning && status != _Gscanrunning {
452 throw("gopark: bad g status")
453 }
454 mp.waitlock = lock
455 mp.waitunlockf = unlockf
456 gp.waitreason = reason
457 mp.waitTraceBlockReason = traceReason
458 mp.waitTraceSkip = traceskip
459 releasem(mp)
460
461 mcall(park_m)
462 }
463
464
465
466 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
467 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
468 }
469
470
471
472
473
474
475
476
477
478
479
480 func goready(gp *g, traceskip int) {
481 systemstack(func() {
482 ready(gp, traceskip, true)
483 })
484 }
485
486
487 func acquireSudog() *sudog {
488
489
490
491
492
493
494
495
496 mp := acquirem()
497 pp := mp.p.ptr()
498 if len(pp.sudogcache) == 0 {
499 lock(&sched.sudoglock)
500
501 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
502 s := sched.sudogcache
503 sched.sudogcache = s.next
504 s.next = nil
505 pp.sudogcache = append(pp.sudogcache, s)
506 }
507 unlock(&sched.sudoglock)
508
509 if len(pp.sudogcache) == 0 {
510 pp.sudogcache = append(pp.sudogcache, new(sudog))
511 }
512 }
513 n := len(pp.sudogcache)
514 s := pp.sudogcache[n-1]
515 pp.sudogcache[n-1] = nil
516 pp.sudogcache = pp.sudogcache[:n-1]
517 if s.elem.get() != nil {
518 throw("acquireSudog: found s.elem != nil in cache")
519 }
520 releasem(mp)
521 return s
522 }
523
524
525 func releaseSudog(s *sudog) {
526 if s.elem.get() != nil {
527 throw("runtime: sudog with non-nil elem")
528 }
529 if s.isSelect {
530 throw("runtime: sudog with non-false isSelect")
531 }
532 if s.next != nil {
533 throw("runtime: sudog with non-nil next")
534 }
535 if s.prev != nil {
536 throw("runtime: sudog with non-nil prev")
537 }
538 if s.waitlink != nil {
539 throw("runtime: sudog with non-nil waitlink")
540 }
541 if s.c.get() != nil {
542 throw("runtime: sudog with non-nil c")
543 }
544 gp := getg()
545 if gp.param != nil {
546 throw("runtime: releaseSudog with non-nil gp.param")
547 }
548 mp := acquirem()
549 pp := mp.p.ptr()
550 if len(pp.sudogcache) == cap(pp.sudogcache) {
551
552 var first, last *sudog
553 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
554 n := len(pp.sudogcache)
555 p := pp.sudogcache[n-1]
556 pp.sudogcache[n-1] = nil
557 pp.sudogcache = pp.sudogcache[:n-1]
558 if first == nil {
559 first = p
560 } else {
561 last.next = p
562 }
563 last = p
564 }
565 lock(&sched.sudoglock)
566 last.next = sched.sudogcache
567 sched.sudogcache = first
568 unlock(&sched.sudoglock)
569 }
570 pp.sudogcache = append(pp.sudogcache, s)
571 releasem(mp)
572 }
573
574
575 func badmcall(fn func(*g)) {
576 throw("runtime: mcall called on m->g0 stack")
577 }
578
579 func badmcall2(fn func(*g)) {
580 throw("runtime: mcall function returned")
581 }
582
583 func badreflectcall() {
584 panic(plainError("arg size to reflect.call more than 1GB"))
585 }
586
587
588
589 func badmorestackg0() {
590 if !crashStackImplemented {
591 writeErrStr("fatal: morestack on g0\n")
592 return
593 }
594
595 g := getg()
596 switchToCrashStack(func() {
597 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
598 g.m.traceback = 2
599 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
600 print("\n")
601
602 throw("morestack on g0")
603 })
604 }
605
606
607
608 func badmorestackgsignal() {
609 writeErrStr("fatal: morestack on gsignal\n")
610 }
611
612
613 func badctxt() {
614 throw("ctxt != 0")
615 }
616
617
618
619 var gcrash g
620
621 var crashingG atomic.Pointer[g]
622
623
624
625
626
627
628
629
630
631 func switchToCrashStack(fn func()) {
632 me := getg()
633 if crashingG.CompareAndSwapNoWB(nil, me) {
634 switchToCrashStack0(fn)
635 abort()
636 }
637 if crashingG.Load() == me {
638
639 writeErrStr("fatal: recursive switchToCrashStack\n")
640 abort()
641 }
642
643 usleep_no_g(100)
644 writeErrStr("fatal: concurrent switchToCrashStack\n")
645 abort()
646 }
647
648
649
650
651 const crashStackImplemented = GOOS != "windows"
652
653
654 func switchToCrashStack0(fn func())
655
656 func lockedOSThread() bool {
657 gp := getg()
658 return gp.lockedm != 0 && gp.m.lockedg != 0
659 }
660
661 var (
662
663
664
665
666
667
668 allglock mutex
669 allgs []*g
670
671
672
673
674
675
676
677
678
679
680
681
682
683 allglen uintptr
684 allgptr **g
685 )
686
687 func allgadd(gp *g) {
688 if readgstatus(gp) == _Gidle {
689 throw("allgadd: bad status Gidle")
690 }
691
692 lock(&allglock)
693 allgs = append(allgs, gp)
694 if &allgs[0] != allgptr {
695 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
696 }
697 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
698 unlock(&allglock)
699 }
700
701
702
703
704 func allGsSnapshot() []*g {
705 assertWorldStoppedOrLockHeld(&allglock)
706
707
708
709
710
711
712 return allgs[:len(allgs):len(allgs)]
713 }
714
715
716 func atomicAllG() (**g, uintptr) {
717 length := atomic.Loaduintptr(&allglen)
718 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
719 return ptr, length
720 }
721
722
723 func atomicAllGIndex(ptr **g, i uintptr) *g {
724 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
725 }
726
727
728
729
730 func forEachG(fn func(gp *g)) {
731 lock(&allglock)
732 for _, gp := range allgs {
733 fn(gp)
734 }
735 unlock(&allglock)
736 }
737
738
739
740
741
742 func forEachGRace(fn func(gp *g)) {
743 ptr, length := atomicAllG()
744 for i := uintptr(0); i < length; i++ {
745 gp := atomicAllGIndex(ptr, i)
746 fn(gp)
747 }
748 return
749 }
750
751 const (
752
753
754 _GoidCacheBatch = 16
755 )
756
757
758
759 func cpuinit(env string) {
760 cpu.Initialize(env)
761
762
763
764 switch GOARCH {
765 case "386", "amd64":
766 x86HasPOPCNT = cpu.X86.HasPOPCNT
767 x86HasSSE41 = cpu.X86.HasSSE41
768 x86HasFMA = cpu.X86.HasFMA
769
770 case "arm":
771 armHasVFPv4 = cpu.ARM.HasVFPv4
772
773 case "arm64":
774 arm64HasATOMICS = cpu.ARM64.HasATOMICS
775
776 case "loong64":
777 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
778 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
779 loong64HasLSX = cpu.Loong64.HasLSX
780
781 case "riscv64":
782 riscv64HasZbb = cpu.RISCV64.HasZbb
783 }
784 }
785
786
787
788
789
790
791 func getGodebugEarly() (string, bool) {
792 const prefix = "GODEBUG="
793 var env string
794 switch GOOS {
795 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
796
797
798
799 n := int32(0)
800 for argv_index(argv, argc+1+n) != nil {
801 n++
802 }
803
804 for i := int32(0); i < n; i++ {
805 p := argv_index(argv, argc+1+i)
806 s := unsafe.String(p, findnull(p))
807
808 if stringslite.HasPrefix(s, prefix) {
809 env = gostringnocopy(p)[len(prefix):]
810 break
811 }
812 }
813 break
814
815 default:
816 return "", false
817 }
818 return env, true
819 }
820
821
822
823
824
825
826
827
828
829 func schedinit() {
830 lockInit(&sched.lock, lockRankSched)
831 lockInit(&sched.sysmonlock, lockRankSysmon)
832 lockInit(&sched.deferlock, lockRankDefer)
833 lockInit(&sched.sudoglock, lockRankSudog)
834 lockInit(&deadlock, lockRankDeadlock)
835 lockInit(&paniclk, lockRankPanic)
836 lockInit(&allglock, lockRankAllg)
837 lockInit(&allpLock, lockRankAllp)
838 lockInit(&reflectOffs.lock, lockRankReflectOffs)
839 lockInit(&finlock, lockRankFin)
840 lockInit(&cpuprof.lock, lockRankCpuprof)
841 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
842 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
843 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
844 traceLockInit()
845
846
847
848 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
849
850 lockVerifyMSize()
851
852 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
853
854
855
856 gp := getg()
857 if raceenabled {
858 gp.racectx, raceprocctx0 = raceinit()
859 }
860
861 sched.maxmcount = 10000
862 crashFD.Store(^uintptr(0))
863
864
865 worldStopped()
866
867 godebug, parsedGodebug := getGodebugEarly()
868 if parsedGodebug {
869 parseRuntimeDebugVars(godebug)
870 }
871 ticks.init()
872 moduledataverify()
873 stackinit()
874 randinit()
875 mallocinit()
876 cpuinit(godebug)
877 alginit()
878 mcommoninit(gp.m, -1)
879 modulesinit()
880 typelinksinit()
881 itabsinit()
882 stkobjinit()
883
884 sigsave(&gp.m.sigmask)
885 initSigmask = gp.m.sigmask
886
887 goargs()
888 goenvs()
889 secure()
890 checkfds()
891 if !parsedGodebug {
892
893
894 parseRuntimeDebugVars(gogetenv("GODEBUG"))
895 }
896 finishDebugVarsSetup()
897 gcinit()
898
899
900
901 gcrash.stack = stackalloc(16384)
902 gcrash.stackguard0 = gcrash.stack.lo + 1000
903 gcrash.stackguard1 = gcrash.stack.lo + 1000
904
905
906
907
908
909 if disableMemoryProfiling {
910 MemProfileRate = 0
911 }
912
913
914 mProfStackInit(gp.m)
915 defaultGOMAXPROCSInit()
916
917 lock(&sched.lock)
918 sched.lastpoll.Store(nanotime())
919 var procs int32
920 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
921 procs = int32(n)
922 sched.customGOMAXPROCS = true
923 } else {
924
925
926
927
928
929
930
931
932 procs = defaultGOMAXPROCS(numCPUStartup)
933 }
934 if procresize(procs) != nil {
935 throw("unknown runnable goroutine during bootstrap")
936 }
937 unlock(&sched.lock)
938
939
940 worldStarted()
941
942 if buildVersion == "" {
943
944
945 buildVersion = "unknown"
946 }
947 if len(modinfo) == 1 {
948
949
950 modinfo = ""
951 }
952 }
953
954 func dumpgstatus(gp *g) {
955 thisg := getg()
956 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
957 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
958 }
959
960
961 func checkmcount() {
962 assertLockHeld(&sched.lock)
963
964
965
966
967
968
969
970
971
972 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
973 if count > sched.maxmcount {
974 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
975 throw("thread exhaustion")
976 }
977 }
978
979
980
981
982
983 func mReserveID() int64 {
984 assertLockHeld(&sched.lock)
985
986 if sched.mnext+1 < sched.mnext {
987 throw("runtime: thread ID overflow")
988 }
989 id := sched.mnext
990 sched.mnext++
991 checkmcount()
992 return id
993 }
994
995
996 func mcommoninit(mp *m, id int64) {
997 gp := getg()
998
999
1000 if gp != gp.m.g0 {
1001 callers(1, mp.createstack[:])
1002 }
1003
1004 lock(&sched.lock)
1005
1006 if id >= 0 {
1007 mp.id = id
1008 } else {
1009 mp.id = mReserveID()
1010 }
1011
1012 mp.self = newMWeakPointer(mp)
1013
1014 mrandinit(mp)
1015
1016 mpreinit(mp)
1017 if mp.gsignal != nil {
1018 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1019 }
1020
1021
1022
1023 mp.alllink = allm
1024
1025
1026
1027 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1028 unlock(&sched.lock)
1029
1030
1031 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1032 mp.cgoCallers = new(cgoCallers)
1033 }
1034 mProfStackInit(mp)
1035 }
1036
1037
1038
1039
1040
1041 func mProfStackInit(mp *m) {
1042 if debug.profstackdepth == 0 {
1043
1044
1045 return
1046 }
1047 mp.profStack = makeProfStackFP()
1048 mp.mLockProfile.stack = makeProfStackFP()
1049 }
1050
1051
1052
1053
1054 func makeProfStackFP() []uintptr {
1055
1056
1057
1058
1059
1060
1061 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1062 }
1063
1064
1065
1066 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1067
1068
1069 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1070
1071 func (mp *m) becomeSpinning() {
1072 mp.spinning = true
1073 sched.nmspinning.Add(1)
1074 sched.needspinning.Store(0)
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084 func (mp *m) snapshotAllp() []*p {
1085 mp.allpSnapshot = allp
1086 return mp.allpSnapshot
1087 }
1088
1089
1090
1091
1092
1093
1094
1095 func (mp *m) clearAllpSnapshot() {
1096 mp.allpSnapshot = nil
1097 }
1098
1099 func (mp *m) hasCgoOnStack() bool {
1100 return mp.ncgo > 0 || mp.isextra
1101 }
1102
1103 const (
1104
1105
1106 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1107
1108
1109
1110 osHasLowResClockInt = goos.IsWindows
1111
1112
1113
1114 osHasLowResClock = osHasLowResClockInt > 0
1115 )
1116
1117
1118 func ready(gp *g, traceskip int, next bool) {
1119 status := readgstatus(gp)
1120
1121
1122 mp := acquirem()
1123 if status&^_Gscan != _Gwaiting {
1124 dumpgstatus(gp)
1125 throw("bad g->status in ready")
1126 }
1127
1128
1129 trace := traceAcquire()
1130 casgstatus(gp, _Gwaiting, _Grunnable)
1131 if trace.ok() {
1132 trace.GoUnpark(gp, traceskip)
1133 traceRelease(trace)
1134 }
1135 runqput(mp.p.ptr(), gp, next)
1136 wakep()
1137 releasem(mp)
1138 }
1139
1140
1141
1142 const freezeStopWait = 0x7fffffff
1143
1144
1145
1146 var freezing atomic.Bool
1147
1148
1149
1150
1151 func freezetheworld() {
1152 freezing.Store(true)
1153 if debug.dontfreezetheworld > 0 {
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 usleep(1000)
1179 return
1180 }
1181
1182
1183
1184
1185 for i := 0; i < 5; i++ {
1186
1187 sched.stopwait = freezeStopWait
1188 sched.gcwaiting.Store(true)
1189
1190 if !preemptall() {
1191 break
1192 }
1193 usleep(1000)
1194 }
1195
1196 usleep(1000)
1197 preemptall()
1198 usleep(1000)
1199 }
1200
1201
1202
1203
1204
1205 func readgstatus(gp *g) uint32 {
1206 return gp.atomicstatus.Load()
1207 }
1208
1209
1210
1211
1212
1213 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1214 success := false
1215
1216
1217 switch oldval {
1218 default:
1219 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1220 dumpgstatus(gp)
1221 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1222 case _Gscanrunnable,
1223 _Gscanwaiting,
1224 _Gscanrunning,
1225 _Gscansyscall,
1226 _Gscanleaked,
1227 _Gscanpreempted,
1228 _Gscandeadextra:
1229 if newval == oldval&^_Gscan {
1230 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1231 }
1232 }
1233 if !success {
1234 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1235 dumpgstatus(gp)
1236 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1237 }
1238 releaseLockRankAndM(lockRankGscan)
1239 }
1240
1241
1242
1243 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1244 switch oldval {
1245 case _Grunnable,
1246 _Grunning,
1247 _Gwaiting,
1248 _Gleaked,
1249 _Gsyscall,
1250 _Gdeadextra:
1251 if newval == oldval|_Gscan {
1252 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1253 if r {
1254 acquireLockRankAndM(lockRankGscan)
1255 }
1256 return r
1257
1258 }
1259 }
1260 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1261 throw("bad oldval passed to castogscanstatus")
1262 return false
1263 }
1264
1265
1266
1267 var casgstatusAlwaysTrack = false
1268
1269
1270
1271
1272
1273
1274
1275 func casgstatus(gp *g, oldval, newval uint32) {
1276 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1277 systemstack(func() {
1278
1279
1280 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1281 throw("casgstatus: bad incoming values")
1282 })
1283 }
1284
1285 lockWithRankMayAcquire(nil, lockRankGscan)
1286
1287
1288 const yieldDelay = 5 * 1000
1289 var nextYield int64
1290
1291
1292
1293 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1294 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1295 systemstack(func() {
1296
1297
1298 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1299 })
1300 }
1301 if i == 0 {
1302 nextYield = nanotime() + yieldDelay
1303 }
1304 if nanotime() < nextYield {
1305 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1306 procyield(1)
1307 }
1308 } else {
1309 osyield()
1310 nextYield = nanotime() + yieldDelay/2
1311 }
1312 }
1313
1314 if gp.bubble != nil {
1315 systemstack(func() {
1316 gp.bubble.changegstatus(gp, oldval, newval)
1317 })
1318 }
1319
1320 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1321
1322
1323 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1324 gp.tracking = true
1325 }
1326 gp.trackingSeq++
1327 }
1328 if !gp.tracking {
1329 return
1330 }
1331
1332
1333
1334
1335
1336
1337 switch oldval {
1338 case _Grunnable:
1339
1340
1341
1342 now := nanotime()
1343 gp.runnableTime += now - gp.trackingStamp
1344 gp.trackingStamp = 0
1345 case _Gwaiting:
1346 if !gp.waitreason.isMutexWait() {
1347
1348 break
1349 }
1350
1351
1352
1353
1354
1355 now := nanotime()
1356 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1357 gp.trackingStamp = 0
1358 }
1359 switch newval {
1360 case _Gwaiting:
1361 if !gp.waitreason.isMutexWait() {
1362
1363 break
1364 }
1365
1366 now := nanotime()
1367 gp.trackingStamp = now
1368 case _Grunnable:
1369
1370
1371 now := nanotime()
1372 gp.trackingStamp = now
1373 case _Grunning:
1374
1375
1376
1377 gp.tracking = false
1378 sched.timeToRun.record(gp.runnableTime)
1379 gp.runnableTime = 0
1380 }
1381 }
1382
1383
1384
1385
1386 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1387
1388 gp.waitreason = reason
1389 casgstatus(gp, old, _Gwaiting)
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1400 if !reason.isWaitingForSuspendG() {
1401 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1402 }
1403 casGToWaiting(gp, old, reason)
1404 }
1405
1406
1407
1408
1409
1410 func casGToPreemptScan(gp *g, old, new uint32) {
1411 if old != _Grunning || new != _Gscan|_Gpreempted {
1412 throw("bad g transition")
1413 }
1414 acquireLockRankAndM(lockRankGscan)
1415 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1416 }
1417
1418
1419
1420
1421
1422
1423 }
1424
1425
1426
1427
1428 func casGFromPreempted(gp *g, old, new uint32) bool {
1429 if old != _Gpreempted || new != _Gwaiting {
1430 throw("bad g transition")
1431 }
1432 gp.waitreason = waitReasonPreempted
1433 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1434 return false
1435 }
1436 if bubble := gp.bubble; bubble != nil {
1437 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1438 }
1439 return true
1440 }
1441
1442
1443 type stwReason uint8
1444
1445
1446
1447
1448 const (
1449 stwUnknown stwReason = iota
1450 stwGCMarkTerm
1451 stwGCSweepTerm
1452 stwWriteHeapDump
1453 stwGoroutineProfile
1454 stwGoroutineProfileCleanup
1455 stwAllGoroutinesStack
1456 stwReadMemStats
1457 stwAllThreadsSyscall
1458 stwGOMAXPROCS
1459 stwStartTrace
1460 stwStopTrace
1461 stwForTestCountPagesInUse
1462 stwForTestReadMetricsSlow
1463 stwForTestReadMemStatsSlow
1464 stwForTestPageCachePagesLeaked
1465 stwForTestResetDebugLog
1466 )
1467
1468 func (r stwReason) String() string {
1469 return stwReasonStrings[r]
1470 }
1471
1472 func (r stwReason) isGC() bool {
1473 return r == stwGCMarkTerm || r == stwGCSweepTerm
1474 }
1475
1476
1477
1478
1479 var stwReasonStrings = [...]string{
1480 stwUnknown: "unknown",
1481 stwGCMarkTerm: "GC mark termination",
1482 stwGCSweepTerm: "GC sweep termination",
1483 stwWriteHeapDump: "write heap dump",
1484 stwGoroutineProfile: "goroutine profile",
1485 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1486 stwAllGoroutinesStack: "all goroutines stack trace",
1487 stwReadMemStats: "read mem stats",
1488 stwAllThreadsSyscall: "AllThreadsSyscall",
1489 stwGOMAXPROCS: "GOMAXPROCS",
1490 stwStartTrace: "start trace",
1491 stwStopTrace: "stop trace",
1492 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1493 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1494 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1495 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1496 stwForTestResetDebugLog: "ResetDebugLog (test)",
1497 }
1498
1499
1500
1501 type worldStop struct {
1502 reason stwReason
1503 startedStopping int64
1504 finishedStopping int64
1505 stoppingCPUTime int64
1506 }
1507
1508
1509
1510
1511 var stopTheWorldContext worldStop
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 func stopTheWorld(reason stwReason) worldStop {
1531 semacquire(&worldsema)
1532 gp := getg()
1533 gp.m.preemptoff = reason.String()
1534 systemstack(func() {
1535 stopTheWorldContext = stopTheWorldWithSema(reason)
1536 })
1537 return stopTheWorldContext
1538 }
1539
1540
1541
1542
1543 func startTheWorld(w worldStop) {
1544 systemstack(func() { startTheWorldWithSema(0, w) })
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 mp := acquirem()
1562 mp.preemptoff = ""
1563 semrelease1(&worldsema, true, 0)
1564 releasem(mp)
1565 }
1566
1567
1568
1569
1570 func stopTheWorldGC(reason stwReason) worldStop {
1571 semacquire(&gcsema)
1572 return stopTheWorld(reason)
1573 }
1574
1575
1576
1577
1578 func startTheWorldGC(w worldStop) {
1579 startTheWorld(w)
1580 semrelease(&gcsema)
1581 }
1582
1583
1584 var worldsema uint32 = 1
1585
1586
1587
1588
1589
1590
1591
1592 var gcsema uint32 = 1
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 func stopTheWorldWithSema(reason stwReason) worldStop {
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1640
1641 trace := traceAcquire()
1642 if trace.ok() {
1643 trace.STWStart(reason)
1644 traceRelease(trace)
1645 }
1646 gp := getg()
1647
1648
1649
1650 if gp.m.locks > 0 {
1651 throw("stopTheWorld: holding locks")
1652 }
1653
1654 lock(&sched.lock)
1655 start := nanotime()
1656 sched.stopwait = gomaxprocs
1657 sched.gcwaiting.Store(true)
1658 preemptall()
1659
1660
1661 gp.m.p.ptr().status = _Pgcstop
1662 gp.m.p.ptr().gcStopTime = start
1663 sched.stopwait--
1664
1665
1666 for _, pp := range allp {
1667 if thread, ok := setBlockOnExitSyscall(pp); ok {
1668 thread.gcstopP()
1669 thread.resume()
1670 }
1671 }
1672
1673
1674 now := nanotime()
1675 for {
1676 pp, _ := pidleget(now)
1677 if pp == nil {
1678 break
1679 }
1680 pp.status = _Pgcstop
1681 pp.gcStopTime = nanotime()
1682 sched.stopwait--
1683 }
1684 wait := sched.stopwait > 0
1685 unlock(&sched.lock)
1686
1687
1688 if wait {
1689 for {
1690
1691 if notetsleep(&sched.stopnote, 100*1000) {
1692 noteclear(&sched.stopnote)
1693 break
1694 }
1695 preemptall()
1696 }
1697 }
1698
1699 finish := nanotime()
1700 startTime := finish - start
1701 if reason.isGC() {
1702 sched.stwStoppingTimeGC.record(startTime)
1703 } else {
1704 sched.stwStoppingTimeOther.record(startTime)
1705 }
1706
1707
1708
1709
1710
1711 stoppingCPUTime := int64(0)
1712 bad := ""
1713 if sched.stopwait != 0 {
1714 bad = "stopTheWorld: not stopped (stopwait != 0)"
1715 } else {
1716 for _, pp := range allp {
1717 if pp.status != _Pgcstop {
1718 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1719 }
1720 if pp.gcStopTime == 0 && bad == "" {
1721 bad = "stopTheWorld: broken CPU time accounting"
1722 }
1723 stoppingCPUTime += finish - pp.gcStopTime
1724 pp.gcStopTime = 0
1725 }
1726 }
1727 if freezing.Load() {
1728
1729
1730
1731
1732 lock(&deadlock)
1733 lock(&deadlock)
1734 }
1735 if bad != "" {
1736 throw(bad)
1737 }
1738
1739 worldStopped()
1740
1741
1742 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1743
1744 return worldStop{
1745 reason: reason,
1746 startedStopping: start,
1747 finishedStopping: finish,
1748 stoppingCPUTime: stoppingCPUTime,
1749 }
1750 }
1751
1752
1753
1754
1755
1756
1757
1758 func startTheWorldWithSema(now int64, w worldStop) int64 {
1759 assertWorldStopped()
1760
1761 mp := acquirem()
1762 if netpollinited() {
1763 list, delta := netpoll(0)
1764 injectglist(&list)
1765 netpollAdjustWaiters(delta)
1766 }
1767 lock(&sched.lock)
1768
1769 procs := gomaxprocs
1770 if newprocs != 0 {
1771 procs = newprocs
1772 newprocs = 0
1773 }
1774 p1 := procresize(procs)
1775 sched.gcwaiting.Store(false)
1776 if sched.sysmonwait.Load() {
1777 sched.sysmonwait.Store(false)
1778 notewakeup(&sched.sysmonnote)
1779 }
1780 unlock(&sched.lock)
1781
1782 worldStarted()
1783
1784 for p1 != nil {
1785 p := p1
1786 p1 = p1.link.ptr()
1787 if p.m != 0 {
1788 mp := p.m.ptr()
1789 p.m = 0
1790 if mp.nextp != 0 {
1791 throw("startTheWorld: inconsistent mp->nextp")
1792 }
1793 mp.nextp.set(p)
1794 notewakeup(&mp.park)
1795 } else {
1796
1797 newm(nil, p, -1)
1798 }
1799 }
1800
1801
1802 if now == 0 {
1803 now = nanotime()
1804 }
1805 totalTime := now - w.startedStopping
1806 if w.reason.isGC() {
1807 sched.stwTotalTimeGC.record(totalTime)
1808 } else {
1809 sched.stwTotalTimeOther.record(totalTime)
1810 }
1811 trace := traceAcquire()
1812 if trace.ok() {
1813 trace.STWDone()
1814 traceRelease(trace)
1815 }
1816
1817
1818
1819
1820 wakep()
1821
1822 releasem(mp)
1823
1824 return now
1825 }
1826
1827
1828
1829 func usesLibcall() bool {
1830 switch GOOS {
1831 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1832 return true
1833 }
1834 return false
1835 }
1836
1837
1838
1839 func mStackIsSystemAllocated() bool {
1840 switch GOOS {
1841 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1842 return true
1843 }
1844 return false
1845 }
1846
1847
1848
1849 func mstart()
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 func mstart0() {
1861 gp := getg()
1862
1863 osStack := gp.stack.lo == 0
1864 if osStack {
1865
1866
1867
1868
1869
1870
1871
1872
1873 size := gp.stack.hi
1874 if size == 0 {
1875 size = 16384 * sys.StackGuardMultiplier
1876 }
1877 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1878 gp.stack.lo = gp.stack.hi - size + 1024
1879 }
1880
1881
1882 gp.stackguard0 = gp.stack.lo + stackGuard
1883
1884
1885 gp.stackguard1 = gp.stackguard0
1886 mstart1()
1887
1888
1889 if mStackIsSystemAllocated() {
1890
1891
1892
1893 osStack = true
1894 }
1895 mexit(osStack)
1896 }
1897
1898
1899
1900
1901
1902 func mstart1() {
1903 gp := getg()
1904
1905 if gp != gp.m.g0 {
1906 throw("bad runtime·mstart")
1907 }
1908
1909
1910
1911
1912
1913
1914
1915 gp.sched.g = guintptr(unsafe.Pointer(gp))
1916 gp.sched.pc = sys.GetCallerPC()
1917 gp.sched.sp = sys.GetCallerSP()
1918
1919 asminit()
1920 minit()
1921
1922
1923
1924 if gp.m == &m0 {
1925 mstartm0()
1926 }
1927
1928 if debug.dataindependenttiming == 1 {
1929 sys.EnableDIT()
1930 }
1931
1932 if fn := gp.m.mstartfn; fn != nil {
1933 fn()
1934 }
1935
1936 if gp.m != &m0 {
1937 acquirep(gp.m.nextp.ptr())
1938 gp.m.nextp = 0
1939 }
1940 schedule()
1941 }
1942
1943
1944
1945
1946
1947
1948
1949 func mstartm0() {
1950
1951
1952
1953 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1954 cgoHasExtraM = true
1955 newextram()
1956 }
1957 initsig(false)
1958 }
1959
1960
1961
1962
1963 func mPark() {
1964 gp := getg()
1965 notesleep(&gp.m.park)
1966 noteclear(&gp.m.park)
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 func mexit(osStack bool) {
1980 mp := getg().m
1981
1982 if mp == &m0 {
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 handoffp(releasep())
1995 lock(&sched.lock)
1996 sched.nmfreed++
1997 checkdead()
1998 unlock(&sched.lock)
1999 mPark()
2000 throw("locked m0 woke up")
2001 }
2002
2003 sigblock(true)
2004 unminit()
2005
2006
2007 if mp.gsignal != nil {
2008 stackfree(mp.gsignal.stack)
2009 if valgrindenabled {
2010 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2011 mp.gsignal.valgrindStackID = 0
2012 }
2013
2014
2015
2016
2017 mp.gsignal = nil
2018 }
2019
2020
2021 vgetrandomDestroy(mp)
2022
2023
2024
2025 mp.self.clear()
2026
2027
2028 lock(&sched.lock)
2029 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2030 if *pprev == mp {
2031 *pprev = mp.alllink
2032 goto found
2033 }
2034 }
2035 throw("m not found in allm")
2036 found:
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 mp.freeWait.Store(freeMWait)
2052 mp.freelink = sched.freem
2053 sched.freem = mp
2054 unlock(&sched.lock)
2055
2056 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2057 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2058
2059
2060 handoffp(releasep())
2061
2062
2063
2064
2065
2066 lock(&sched.lock)
2067 sched.nmfreed++
2068 checkdead()
2069 unlock(&sched.lock)
2070
2071 if GOOS == "darwin" || GOOS == "ios" {
2072
2073
2074 if mp.signalPending.Load() != 0 {
2075 pendingPreemptSignals.Add(-1)
2076 }
2077 }
2078
2079
2080
2081 mdestroy(mp)
2082
2083 if osStack {
2084
2085 mp.freeWait.Store(freeMRef)
2086
2087
2088
2089 return
2090 }
2091
2092
2093
2094
2095
2096 exitThread(&mp.freeWait)
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 func forEachP(reason waitReason, fn func(*p)) {
2110 systemstack(func() {
2111 gp := getg().m.curg
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 casGToWaitingForSuspendG(gp, _Grunning, reason)
2124 forEachPInternal(fn)
2125 casgstatus(gp, _Gwaiting, _Grunning)
2126 })
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 func forEachPInternal(fn func(*p)) {
2139 mp := acquirem()
2140 pp := getg().m.p.ptr()
2141
2142 lock(&sched.lock)
2143 if sched.safePointWait != 0 {
2144 throw("forEachP: sched.safePointWait != 0")
2145 }
2146 sched.safePointWait = gomaxprocs - 1
2147 sched.safePointFn = fn
2148
2149
2150 for _, p2 := range allp {
2151 if p2 != pp {
2152 atomic.Store(&p2.runSafePointFn, 1)
2153 }
2154 }
2155 preemptall()
2156
2157
2158
2159
2160
2161
2162
2163 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2164 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2165 fn(p)
2166 sched.safePointWait--
2167 }
2168 }
2169
2170 wait := sched.safePointWait > 0
2171 unlock(&sched.lock)
2172
2173
2174 fn(pp)
2175
2176
2177
2178 for _, p2 := range allp {
2179 if atomic.Load(&p2.runSafePointFn) != 1 {
2180
2181 continue
2182 }
2183 if thread, ok := setBlockOnExitSyscall(p2); ok {
2184 thread.takeP()
2185 thread.resume()
2186 handoffp(p2)
2187 }
2188 }
2189
2190
2191 if wait {
2192 for {
2193
2194
2195
2196
2197 if notetsleep(&sched.safePointNote, 100*1000) {
2198 noteclear(&sched.safePointNote)
2199 break
2200 }
2201 preemptall()
2202 }
2203 }
2204 if sched.safePointWait != 0 {
2205 throw("forEachP: not done")
2206 }
2207 for _, p2 := range allp {
2208 if p2.runSafePointFn != 0 {
2209 throw("forEachP: P did not run fn")
2210 }
2211 }
2212
2213 lock(&sched.lock)
2214 sched.safePointFn = nil
2215 unlock(&sched.lock)
2216 releasem(mp)
2217 }
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230 func runSafePointFn() {
2231 p := getg().m.p.ptr()
2232
2233
2234
2235 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2236 return
2237 }
2238 sched.safePointFn(p)
2239 lock(&sched.lock)
2240 sched.safePointWait--
2241 if sched.safePointWait == 0 {
2242 notewakeup(&sched.safePointNote)
2243 }
2244 unlock(&sched.lock)
2245 }
2246
2247
2248
2249
2250 var cgoThreadStart unsafe.Pointer
2251
2252 type cgothreadstart struct {
2253 g guintptr
2254 tls *uint64
2255 fn unsafe.Pointer
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 func allocm(pp *p, fn func(), id int64) *m {
2268 allocmLock.rlock()
2269
2270
2271
2272
2273 acquirem()
2274
2275 gp := getg()
2276 if gp.m.p == 0 {
2277 acquirep(pp)
2278 }
2279
2280
2281
2282 if sched.freem != nil {
2283 lock(&sched.lock)
2284 var newList *m
2285 for freem := sched.freem; freem != nil; {
2286
2287 wait := freem.freeWait.Load()
2288 if wait == freeMWait {
2289 next := freem.freelink
2290 freem.freelink = newList
2291 newList = freem
2292 freem = next
2293 continue
2294 }
2295
2296
2297
2298 if traceEnabled() || traceShuttingDown() {
2299 traceThreadDestroy(freem)
2300 }
2301
2302
2303
2304 if wait == freeMStack {
2305
2306
2307
2308 systemstack(func() {
2309 stackfree(freem.g0.stack)
2310 if valgrindenabled {
2311 valgrindDeregisterStack(freem.g0.valgrindStackID)
2312 freem.g0.valgrindStackID = 0
2313 }
2314 })
2315 }
2316 freem = freem.freelink
2317 }
2318 sched.freem = newList
2319 unlock(&sched.lock)
2320 }
2321
2322 mp := &new(mPadded).m
2323 mp.mstartfn = fn
2324 mcommoninit(mp, id)
2325
2326
2327
2328 if iscgo || mStackIsSystemAllocated() {
2329 mp.g0 = malg(-1)
2330 } else {
2331 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2332 }
2333 mp.g0.m = mp
2334
2335 if pp == gp.m.p.ptr() {
2336 releasep()
2337 }
2338
2339 releasem(gp.m)
2340 allocmLock.runlock()
2341 return mp
2342 }
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383 func needm(signal bool) {
2384 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2385
2386
2387
2388
2389
2390
2391 writeErrStr("fatal error: cgo callback before cgo call\n")
2392 exit(1)
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 var sigmask sigset
2404 sigsave(&sigmask)
2405 sigblock(false)
2406
2407
2408
2409
2410 mp, last := getExtraM()
2411
2412
2413
2414
2415
2416
2417
2418
2419 mp.needextram = last
2420
2421
2422 mp.sigmask = sigmask
2423
2424
2425
2426 osSetupTLS(mp)
2427
2428
2429
2430 setg(mp.g0)
2431 sp := sys.GetCallerSP()
2432 callbackUpdateSystemStack(mp, sp, signal)
2433
2434
2435
2436
2437 mp.isExtraInC = false
2438
2439
2440 asminit()
2441 minit()
2442
2443
2444
2445
2446
2447
2448 var trace traceLocker
2449 if !signal {
2450 trace = traceAcquire()
2451 }
2452
2453
2454 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2455 sched.ngsys.Add(-1)
2456 sched.nGsyscallNoP.Add(1)
2457
2458 if !signal {
2459 if trace.ok() {
2460 trace.GoCreateSyscall(mp.curg)
2461 traceRelease(trace)
2462 }
2463 }
2464 mp.isExtraInSig = signal
2465 }
2466
2467
2468
2469
2470 func needAndBindM() {
2471 needm(false)
2472
2473 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2474 cgoBindM()
2475 }
2476 }
2477
2478
2479
2480
2481 func newextram() {
2482 c := extraMWaiters.Swap(0)
2483 if c > 0 {
2484 for i := uint32(0); i < c; i++ {
2485 oneNewExtraM()
2486 }
2487 } else if extraMLength.Load() == 0 {
2488
2489 oneNewExtraM()
2490 }
2491 }
2492
2493
2494 func oneNewExtraM() {
2495
2496
2497
2498
2499
2500 mp := allocm(nil, nil, -1)
2501 gp := malg(4096)
2502 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2503 gp.sched.sp = gp.stack.hi
2504 gp.sched.sp -= 4 * goarch.PtrSize
2505 gp.sched.lr = 0
2506 gp.sched.g = guintptr(unsafe.Pointer(gp))
2507 gp.syscallpc = gp.sched.pc
2508 gp.syscallsp = gp.sched.sp
2509 gp.stktopsp = gp.sched.sp
2510
2511
2512
2513 casgstatus(gp, _Gidle, _Gdeadextra)
2514 gp.m = mp
2515 mp.curg = gp
2516 mp.isextra = true
2517
2518 mp.isExtraInC = true
2519 mp.lockedInt++
2520 mp.lockedg.set(gp)
2521 gp.lockedm.set(mp)
2522 gp.goid = sched.goidgen.Add(1)
2523 if raceenabled {
2524 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2525 }
2526
2527 allgadd(gp)
2528
2529
2530
2531
2532
2533 sched.ngsys.Add(1)
2534
2535
2536 addExtraM(mp)
2537 }
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 func dropm() {
2573
2574
2575
2576 mp := getg().m
2577
2578
2579
2580
2581
2582 var trace traceLocker
2583 if !mp.isExtraInSig {
2584 trace = traceAcquire()
2585 }
2586
2587
2588 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2589 mp.curg.preemptStop = false
2590 sched.ngsys.Add(1)
2591 sched.nGsyscallNoP.Add(-1)
2592
2593 if !mp.isExtraInSig {
2594 if trace.ok() {
2595 trace.GoDestroySyscall()
2596 traceRelease(trace)
2597 }
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613 mp.syscalltick--
2614
2615
2616
2617 mp.curg.trace.reset()
2618
2619
2620
2621
2622 if traceEnabled() || traceShuttingDown() {
2623
2624
2625
2626
2627
2628
2629
2630 lock(&sched.lock)
2631 traceThreadDestroy(mp)
2632 unlock(&sched.lock)
2633 }
2634 mp.isExtraInSig = false
2635
2636
2637
2638
2639
2640 sigmask := mp.sigmask
2641 sigblock(false)
2642 unminit()
2643
2644 setg(nil)
2645
2646
2647
2648 g0 := mp.g0
2649 g0.stack.hi = 0
2650 g0.stack.lo = 0
2651 g0.stackguard0 = 0
2652 g0.stackguard1 = 0
2653 mp.g0StackAccurate = false
2654
2655 putExtraM(mp)
2656
2657 msigrestore(sigmask)
2658 }
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 func cgoBindM() {
2681 if GOOS == "windows" || GOOS == "plan9" {
2682 fatal("bindm in unexpected GOOS")
2683 }
2684 g := getg()
2685 if g.m.g0 != g {
2686 fatal("the current g is not g0")
2687 }
2688 if _cgo_bindm != nil {
2689 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2690 }
2691 }
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 func getm() uintptr {
2705 return uintptr(unsafe.Pointer(getg().m))
2706 }
2707
2708 var (
2709
2710
2711
2712
2713
2714
2715 extraM atomic.Uintptr
2716
2717 extraMLength atomic.Uint32
2718
2719 extraMWaiters atomic.Uint32
2720
2721
2722 extraMInUse atomic.Uint32
2723 )
2724
2725
2726
2727
2728
2729
2730
2731
2732 func lockextra(nilokay bool) *m {
2733 const locked = 1
2734
2735 incr := false
2736 for {
2737 old := extraM.Load()
2738 if old == locked {
2739 osyield_no_g()
2740 continue
2741 }
2742 if old == 0 && !nilokay {
2743 if !incr {
2744
2745
2746
2747 extraMWaiters.Add(1)
2748 incr = true
2749 }
2750 usleep_no_g(1)
2751 continue
2752 }
2753 if extraM.CompareAndSwap(old, locked) {
2754 return (*m)(unsafe.Pointer(old))
2755 }
2756 osyield_no_g()
2757 continue
2758 }
2759 }
2760
2761
2762 func unlockextra(mp *m, delta int32) {
2763 extraMLength.Add(delta)
2764 extraM.Store(uintptr(unsafe.Pointer(mp)))
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774 func getExtraM() (mp *m, last bool) {
2775 mp = lockextra(false)
2776 extraMInUse.Add(1)
2777 unlockextra(mp.schedlink.ptr(), -1)
2778 return mp, mp.schedlink.ptr() == nil
2779 }
2780
2781
2782
2783
2784
2785 func putExtraM(mp *m) {
2786 extraMInUse.Add(-1)
2787 addExtraM(mp)
2788 }
2789
2790
2791
2792
2793 func addExtraM(mp *m) {
2794 mnext := lockextra(true)
2795 mp.schedlink.set(mnext)
2796 unlockextra(mp, 1)
2797 }
2798
2799 var (
2800
2801
2802
2803 allocmLock rwmutex
2804
2805
2806
2807
2808 execLock rwmutex
2809 )
2810
2811
2812
2813 const (
2814 failthreadcreate = "runtime: failed to create new OS thread\n"
2815 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2816 )
2817
2818
2819
2820
2821 var newmHandoff struct {
2822 lock mutex
2823
2824
2825
2826 newm muintptr
2827
2828
2829
2830 waiting bool
2831 wake note
2832
2833
2834
2835
2836 haveTemplateThread uint32
2837 }
2838
2839
2840
2841
2842
2843
2844
2845
2846 func newm(fn func(), pp *p, id int64) {
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857 acquirem()
2858
2859 mp := allocm(pp, fn, id)
2860 mp.nextp.set(pp)
2861 mp.sigmask = initSigmask
2862 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874 lock(&newmHandoff.lock)
2875 if newmHandoff.haveTemplateThread == 0 {
2876 throw("on a locked thread with no template thread")
2877 }
2878 mp.schedlink = newmHandoff.newm
2879 newmHandoff.newm.set(mp)
2880 if newmHandoff.waiting {
2881 newmHandoff.waiting = false
2882 notewakeup(&newmHandoff.wake)
2883 }
2884 unlock(&newmHandoff.lock)
2885
2886
2887
2888 releasem(getg().m)
2889 return
2890 }
2891 newm1(mp)
2892 releasem(getg().m)
2893 }
2894
2895 func newm1(mp *m) {
2896 if iscgo {
2897 var ts cgothreadstart
2898 if _cgo_thread_start == nil {
2899 throw("_cgo_thread_start missing")
2900 }
2901 ts.g.set(mp.g0)
2902 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2903 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2904 if msanenabled {
2905 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2906 }
2907 if asanenabled {
2908 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2909 }
2910 execLock.rlock()
2911 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2912 execLock.runlock()
2913 return
2914 }
2915 execLock.rlock()
2916 newosproc(mp)
2917 execLock.runlock()
2918 }
2919
2920
2921
2922
2923
2924 func startTemplateThread() {
2925 if GOARCH == "wasm" {
2926 return
2927 }
2928
2929
2930
2931 mp := acquirem()
2932 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2933 releasem(mp)
2934 return
2935 }
2936 newm(templateThread, nil, -1)
2937 releasem(mp)
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952 func templateThread() {
2953 lock(&sched.lock)
2954 sched.nmsys++
2955 checkdead()
2956 unlock(&sched.lock)
2957
2958 for {
2959 lock(&newmHandoff.lock)
2960 for newmHandoff.newm != 0 {
2961 newm := newmHandoff.newm.ptr()
2962 newmHandoff.newm = 0
2963 unlock(&newmHandoff.lock)
2964 for newm != nil {
2965 next := newm.schedlink.ptr()
2966 newm.schedlink = 0
2967 newm1(newm)
2968 newm = next
2969 }
2970 lock(&newmHandoff.lock)
2971 }
2972 newmHandoff.waiting = true
2973 noteclear(&newmHandoff.wake)
2974 unlock(&newmHandoff.lock)
2975 notesleep(&newmHandoff.wake)
2976 }
2977 }
2978
2979
2980
2981 func stopm() {
2982 gp := getg()
2983
2984 if gp.m.locks != 0 {
2985 throw("stopm holding locks")
2986 }
2987 if gp.m.p != 0 {
2988 throw("stopm holding p")
2989 }
2990 if gp.m.spinning {
2991 throw("stopm spinning")
2992 }
2993
2994 lock(&sched.lock)
2995 mput(gp.m)
2996 unlock(&sched.lock)
2997 mPark()
2998 acquirep(gp.m.nextp.ptr())
2999 gp.m.nextp = 0
3000 }
3001
3002 func mspinning() {
3003
3004 getg().m.spinning = true
3005 }
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024 func startm(pp *p, spinning, lockheld bool) {
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 mp := acquirem()
3042 if !lockheld {
3043 lock(&sched.lock)
3044 }
3045 if pp == nil {
3046 if spinning {
3047
3048
3049
3050 throw("startm: P required for spinning=true")
3051 }
3052 pp, _ = pidleget(0)
3053 if pp == nil {
3054 if !lockheld {
3055 unlock(&sched.lock)
3056 }
3057 releasem(mp)
3058 return
3059 }
3060 }
3061 nmp := mget()
3062 if nmp == nil {
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077 id := mReserveID()
3078 unlock(&sched.lock)
3079
3080 var fn func()
3081 if spinning {
3082
3083 fn = mspinning
3084 }
3085 newm(fn, pp, id)
3086
3087 if lockheld {
3088 lock(&sched.lock)
3089 }
3090
3091
3092 releasem(mp)
3093 return
3094 }
3095 if !lockheld {
3096 unlock(&sched.lock)
3097 }
3098 if nmp.spinning {
3099 throw("startm: m is spinning")
3100 }
3101 if nmp.nextp != 0 {
3102 throw("startm: m has p")
3103 }
3104 if spinning && !runqempty(pp) {
3105 throw("startm: p has runnable gs")
3106 }
3107
3108 nmp.spinning = spinning
3109 nmp.nextp.set(pp)
3110 notewakeup(&nmp.park)
3111
3112
3113 releasem(mp)
3114 }
3115
3116
3117
3118
3119
3120 func handoffp(pp *p) {
3121
3122
3123
3124
3125 if !runqempty(pp) || !sched.runq.empty() {
3126 startm(pp, false, false)
3127 return
3128 }
3129
3130 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3131 startm(pp, false, false)
3132 return
3133 }
3134
3135 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3136 startm(pp, false, false)
3137 return
3138 }
3139
3140
3141 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3142 sched.needspinning.Store(0)
3143 startm(pp, true, false)
3144 return
3145 }
3146 lock(&sched.lock)
3147 if sched.gcwaiting.Load() {
3148 pp.status = _Pgcstop
3149 pp.gcStopTime = nanotime()
3150 sched.stopwait--
3151 if sched.stopwait == 0 {
3152 notewakeup(&sched.stopnote)
3153 }
3154 unlock(&sched.lock)
3155 return
3156 }
3157 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3158 sched.safePointFn(pp)
3159 sched.safePointWait--
3160 if sched.safePointWait == 0 {
3161 notewakeup(&sched.safePointNote)
3162 }
3163 }
3164 if !sched.runq.empty() {
3165 unlock(&sched.lock)
3166 startm(pp, false, false)
3167 return
3168 }
3169
3170
3171 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3172 unlock(&sched.lock)
3173 startm(pp, false, false)
3174 return
3175 }
3176
3177
3178
3179 when := pp.timers.wakeTime()
3180 pidleput(pp, 0)
3181 unlock(&sched.lock)
3182
3183 if when != 0 {
3184 wakeNetPoller(when)
3185 }
3186 }
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 func wakep() {
3202
3203
3204 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3205 return
3206 }
3207
3208
3209
3210
3211
3212
3213 mp := acquirem()
3214
3215 var pp *p
3216 lock(&sched.lock)
3217 pp, _ = pidlegetSpinning(0)
3218 if pp == nil {
3219 if sched.nmspinning.Add(-1) < 0 {
3220 throw("wakep: negative nmspinning")
3221 }
3222 unlock(&sched.lock)
3223 releasem(mp)
3224 return
3225 }
3226
3227
3228
3229
3230 unlock(&sched.lock)
3231
3232 startm(pp, true, false)
3233
3234 releasem(mp)
3235 }
3236
3237
3238
3239 func stoplockedm() {
3240 gp := getg()
3241
3242 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3243 throw("stoplockedm: inconsistent locking")
3244 }
3245 if gp.m.p != 0 {
3246
3247 pp := releasep()
3248 handoffp(pp)
3249 }
3250 incidlelocked(1)
3251
3252 mPark()
3253 status := readgstatus(gp.m.lockedg.ptr())
3254 if status&^_Gscan != _Grunnable {
3255 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3256 dumpgstatus(gp.m.lockedg.ptr())
3257 throw("stoplockedm: not runnable")
3258 }
3259 acquirep(gp.m.nextp.ptr())
3260 gp.m.nextp = 0
3261 }
3262
3263
3264
3265
3266
3267 func startlockedm(gp *g) {
3268 mp := gp.lockedm.ptr()
3269 if mp == getg().m {
3270 throw("startlockedm: locked to me")
3271 }
3272 if mp.nextp != 0 {
3273 throw("startlockedm: m has p")
3274 }
3275
3276 incidlelocked(-1)
3277 pp := releasep()
3278 mp.nextp.set(pp)
3279 notewakeup(&mp.park)
3280 stopm()
3281 }
3282
3283
3284
3285 func gcstopm() {
3286 gp := getg()
3287
3288 if !sched.gcwaiting.Load() {
3289 throw("gcstopm: not waiting for gc")
3290 }
3291 if gp.m.spinning {
3292 gp.m.spinning = false
3293
3294
3295 if sched.nmspinning.Add(-1) < 0 {
3296 throw("gcstopm: negative nmspinning")
3297 }
3298 }
3299 pp := releasep()
3300 lock(&sched.lock)
3301 pp.status = _Pgcstop
3302 pp.gcStopTime = nanotime()
3303 sched.stopwait--
3304 if sched.stopwait == 0 {
3305 notewakeup(&sched.stopnote)
3306 }
3307 unlock(&sched.lock)
3308 stopm()
3309 }
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320 func execute(gp *g, inheritTime bool) {
3321 mp := getg().m
3322
3323 if goroutineProfile.active {
3324
3325
3326
3327 tryRecordGoroutineProfile(gp, nil, osyield)
3328 }
3329
3330
3331 mp.curg = gp
3332 gp.m = mp
3333 gp.syncSafePoint = false
3334 casgstatus(gp, _Grunnable, _Grunning)
3335 gp.waitsince = 0
3336 gp.preempt = false
3337 gp.stackguard0 = gp.stack.lo + stackGuard
3338 if !inheritTime {
3339 mp.p.ptr().schedtick++
3340 }
3341
3342
3343 hz := sched.profilehz
3344 if mp.profilehz != hz {
3345 setThreadCPUProfiler(hz)
3346 }
3347
3348 trace := traceAcquire()
3349 if trace.ok() {
3350 trace.GoStart()
3351 traceRelease(trace)
3352 }
3353
3354 gogo(&gp.sched)
3355 }
3356
3357
3358
3359
3360
3361 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3362 mp := getg().m
3363
3364
3365
3366
3367
3368 top:
3369
3370
3371
3372 mp.clearAllpSnapshot()
3373
3374 pp := mp.p.ptr()
3375 if sched.gcwaiting.Load() {
3376 gcstopm()
3377 goto top
3378 }
3379 if pp.runSafePointFn != 0 {
3380 runSafePointFn()
3381 }
3382
3383
3384
3385
3386
3387 now, pollUntil, _ := pp.timers.check(0, nil)
3388
3389
3390 if traceEnabled() || traceShuttingDown() {
3391 gp := traceReader()
3392 if gp != nil {
3393 trace := traceAcquire()
3394 casgstatus(gp, _Gwaiting, _Grunnable)
3395 if trace.ok() {
3396 trace.GoUnpark(gp, 0)
3397 traceRelease(trace)
3398 }
3399 return gp, false, true
3400 }
3401 }
3402
3403
3404 if gcBlackenEnabled != 0 {
3405 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3406 if gp != nil {
3407 return gp, false, true
3408 }
3409 now = tnow
3410 }
3411
3412
3413
3414
3415 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3416 lock(&sched.lock)
3417 gp := globrunqget()
3418 unlock(&sched.lock)
3419 if gp != nil {
3420 return gp, false, false
3421 }
3422 }
3423
3424
3425 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3426 if gp := wakefing(); gp != nil {
3427 ready(gp, 0, true)
3428 }
3429 }
3430
3431
3432 if gcCleanups.needsWake() {
3433 gcCleanups.wake()
3434 }
3435
3436 if *cgo_yield != nil {
3437 asmcgocall(*cgo_yield, nil)
3438 }
3439
3440
3441 if gp, inheritTime := runqget(pp); gp != nil {
3442 return gp, inheritTime, false
3443 }
3444
3445
3446 if !sched.runq.empty() {
3447 lock(&sched.lock)
3448 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3449 unlock(&sched.lock)
3450 if gp != nil {
3451 if runqputbatch(pp, &q); !q.empty() {
3452 throw("Couldn't put Gs into empty local runq")
3453 }
3454 return gp, false, false
3455 }
3456 }
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3468 list, delta := netpoll(0)
3469 sched.pollingNet.Store(0)
3470 if !list.empty() {
3471 gp := list.pop()
3472 injectglist(&list)
3473 netpollAdjustWaiters(delta)
3474 trace := traceAcquire()
3475 casgstatus(gp, _Gwaiting, _Grunnable)
3476 if trace.ok() {
3477 trace.GoUnpark(gp, 0)
3478 traceRelease(trace)
3479 }
3480 return gp, false, false
3481 }
3482 }
3483
3484
3485
3486
3487
3488
3489 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3490 if !mp.spinning {
3491 mp.becomeSpinning()
3492 }
3493
3494 gp, inheritTime, tnow, w, newWork := stealWork(now)
3495 if gp != nil {
3496
3497 return gp, inheritTime, false
3498 }
3499 if newWork {
3500
3501
3502 goto top
3503 }
3504
3505 now = tnow
3506 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3507
3508 pollUntil = w
3509 }
3510 }
3511
3512
3513
3514
3515
3516 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3517 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3518 if node != nil {
3519 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3520 gp := node.gp.ptr()
3521
3522 trace := traceAcquire()
3523 casgstatus(gp, _Gwaiting, _Grunnable)
3524 if trace.ok() {
3525 trace.GoUnpark(gp, 0)
3526 traceRelease(trace)
3527 }
3528 return gp, false, false
3529 }
3530 gcController.removeIdleMarkWorker()
3531 }
3532
3533
3534
3535
3536
3537 gp, otherReady := beforeIdle(now, pollUntil)
3538 if gp != nil {
3539 trace := traceAcquire()
3540 casgstatus(gp, _Gwaiting, _Grunnable)
3541 if trace.ok() {
3542 trace.GoUnpark(gp, 0)
3543 traceRelease(trace)
3544 }
3545 return gp, false, false
3546 }
3547 if otherReady {
3548 goto top
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559 allpSnapshot := mp.snapshotAllp()
3560
3561
3562 idlepMaskSnapshot := idlepMask
3563 timerpMaskSnapshot := timerpMask
3564
3565
3566 lock(&sched.lock)
3567 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3568 unlock(&sched.lock)
3569 goto top
3570 }
3571 if !sched.runq.empty() {
3572 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3573 unlock(&sched.lock)
3574 if gp == nil {
3575 throw("global runq empty with non-zero runqsize")
3576 }
3577 if runqputbatch(pp, &q); !q.empty() {
3578 throw("Couldn't put Gs into empty local runq")
3579 }
3580 return gp, false, false
3581 }
3582 if !mp.spinning && sched.needspinning.Load() == 1 {
3583
3584 mp.becomeSpinning()
3585 unlock(&sched.lock)
3586 goto top
3587 }
3588 if releasep() != pp {
3589 throw("findrunnable: wrong p")
3590 }
3591 now = pidleput(pp, now)
3592 unlock(&sched.lock)
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630 wasSpinning := mp.spinning
3631 if mp.spinning {
3632 mp.spinning = false
3633 if sched.nmspinning.Add(-1) < 0 {
3634 throw("findrunnable: negative nmspinning")
3635 }
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648 lock(&sched.lock)
3649 if !sched.runq.empty() {
3650 pp, _ := pidlegetSpinning(0)
3651 if pp != nil {
3652 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3653 unlock(&sched.lock)
3654 if gp == nil {
3655 throw("global runq empty with non-zero runqsize")
3656 }
3657 if runqputbatch(pp, &q); !q.empty() {
3658 throw("Couldn't put Gs into empty local runq")
3659 }
3660 acquirep(pp)
3661 mp.becomeSpinning()
3662 return gp, false, false
3663 }
3664 }
3665 unlock(&sched.lock)
3666
3667 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3668 if pp != nil {
3669 acquirep(pp)
3670 mp.becomeSpinning()
3671 goto top
3672 }
3673
3674
3675 pp, gp := checkIdleGCNoP()
3676 if pp != nil {
3677 acquirep(pp)
3678 mp.becomeSpinning()
3679
3680
3681 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3682 trace := traceAcquire()
3683 casgstatus(gp, _Gwaiting, _Grunnable)
3684 if trace.ok() {
3685 trace.GoUnpark(gp, 0)
3686 traceRelease(trace)
3687 }
3688 return gp, false, false
3689 }
3690
3691
3692
3693
3694
3695
3696
3697 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3698 }
3699
3700
3701
3702
3703
3704 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3705 sched.pollUntil.Store(pollUntil)
3706 if mp.p != 0 {
3707 throw("findrunnable: netpoll with p")
3708 }
3709 if mp.spinning {
3710 throw("findrunnable: netpoll with spinning")
3711 }
3712 delay := int64(-1)
3713 if pollUntil != 0 {
3714 if now == 0 {
3715 now = nanotime()
3716 }
3717 delay = pollUntil - now
3718 if delay < 0 {
3719 delay = 0
3720 }
3721 }
3722 if faketime != 0 {
3723
3724 delay = 0
3725 }
3726 list, delta := netpoll(delay)
3727
3728 now = nanotime()
3729 sched.pollUntil.Store(0)
3730 sched.lastpoll.Store(now)
3731 if faketime != 0 && list.empty() {
3732
3733
3734 stopm()
3735 goto top
3736 }
3737 lock(&sched.lock)
3738 pp, _ := pidleget(now)
3739 unlock(&sched.lock)
3740 if pp == nil {
3741 injectglist(&list)
3742 netpollAdjustWaiters(delta)
3743 } else {
3744 acquirep(pp)
3745 if !list.empty() {
3746 gp := list.pop()
3747 injectglist(&list)
3748 netpollAdjustWaiters(delta)
3749 trace := traceAcquire()
3750 casgstatus(gp, _Gwaiting, _Grunnable)
3751 if trace.ok() {
3752 trace.GoUnpark(gp, 0)
3753 traceRelease(trace)
3754 }
3755 return gp, false, false
3756 }
3757 if wasSpinning {
3758 mp.becomeSpinning()
3759 }
3760 goto top
3761 }
3762 } else if pollUntil != 0 && netpollinited() {
3763 pollerPollUntil := sched.pollUntil.Load()
3764 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3765 netpollBreak()
3766 }
3767 }
3768 stopm()
3769 goto top
3770 }
3771
3772
3773
3774
3775
3776 func pollWork() bool {
3777 if !sched.runq.empty() {
3778 return true
3779 }
3780 p := getg().m.p.ptr()
3781 if !runqempty(p) {
3782 return true
3783 }
3784 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3785 if list, delta := netpoll(0); !list.empty() {
3786 injectglist(&list)
3787 netpollAdjustWaiters(delta)
3788 return true
3789 }
3790 }
3791 return false
3792 }
3793
3794
3795
3796
3797
3798
3799
3800 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3801 pp := getg().m.p.ptr()
3802
3803 ranTimer := false
3804
3805 const stealTries = 4
3806 for i := 0; i < stealTries; i++ {
3807 stealTimersOrRunNextG := i == stealTries-1
3808
3809 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3810 if sched.gcwaiting.Load() {
3811
3812 return nil, false, now, pollUntil, true
3813 }
3814 p2 := allp[enum.position()]
3815 if pp == p2 {
3816 continue
3817 }
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3833 tnow, w, ran := p2.timers.check(now, nil)
3834 now = tnow
3835 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3836 pollUntil = w
3837 }
3838 if ran {
3839
3840
3841
3842
3843
3844
3845
3846
3847 if gp, inheritTime := runqget(pp); gp != nil {
3848 return gp, inheritTime, now, pollUntil, ranTimer
3849 }
3850 ranTimer = true
3851 }
3852 }
3853
3854
3855 if !idlepMask.read(enum.position()) {
3856 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3857 return gp, false, now, pollUntil, ranTimer
3858 }
3859 }
3860 }
3861 }
3862
3863
3864
3865
3866 return nil, false, now, pollUntil, ranTimer
3867 }
3868
3869
3870
3871
3872
3873
3874 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3875 for id, p2 := range allpSnapshot {
3876 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3877 lock(&sched.lock)
3878 pp, _ := pidlegetSpinning(0)
3879 if pp == nil {
3880
3881 unlock(&sched.lock)
3882 return nil
3883 }
3884 unlock(&sched.lock)
3885 return pp
3886 }
3887 }
3888
3889
3890 return nil
3891 }
3892
3893
3894
3895
3896 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3897 for id, p2 := range allpSnapshot {
3898 if timerpMaskSnapshot.read(uint32(id)) {
3899 w := p2.timers.wakeTime()
3900 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3901 pollUntil = w
3902 }
3903 }
3904 }
3905
3906 return pollUntil
3907 }
3908
3909
3910
3911
3912
3913 func checkIdleGCNoP() (*p, *g) {
3914
3915
3916
3917
3918
3919
3920 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3921 return nil, nil
3922 }
3923 if !gcShouldScheduleWorker(nil) {
3924 return nil, nil
3925 }
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944 lock(&sched.lock)
3945 pp, now := pidlegetSpinning(0)
3946 if pp == nil {
3947 unlock(&sched.lock)
3948 return nil, nil
3949 }
3950
3951
3952 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3953 pidleput(pp, now)
3954 unlock(&sched.lock)
3955 return nil, nil
3956 }
3957
3958 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3959 if node == nil {
3960 pidleput(pp, now)
3961 unlock(&sched.lock)
3962 gcController.removeIdleMarkWorker()
3963 return nil, nil
3964 }
3965
3966 unlock(&sched.lock)
3967
3968 return pp, node.gp.ptr()
3969 }
3970
3971
3972
3973
3974 func wakeNetPoller(when int64) {
3975 if sched.lastpoll.Load() == 0 {
3976
3977
3978
3979
3980 pollerPollUntil := sched.pollUntil.Load()
3981 if pollerPollUntil == 0 || pollerPollUntil > when {
3982 netpollBreak()
3983 }
3984 } else {
3985
3986
3987 if GOOS != "plan9" {
3988 wakep()
3989 }
3990 }
3991 }
3992
3993 func resetspinning() {
3994 gp := getg()
3995 if !gp.m.spinning {
3996 throw("resetspinning: not a spinning m")
3997 }
3998 gp.m.spinning = false
3999 nmspinning := sched.nmspinning.Add(-1)
4000 if nmspinning < 0 {
4001 throw("findrunnable: negative nmspinning")
4002 }
4003
4004
4005
4006 wakep()
4007 }
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017 func injectglist(glist *gList) {
4018 if glist.empty() {
4019 return
4020 }
4021
4022
4023
4024 var tail *g
4025 trace := traceAcquire()
4026 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4027 tail = gp
4028 casgstatus(gp, _Gwaiting, _Grunnable)
4029 if trace.ok() {
4030 trace.GoUnpark(gp, 0)
4031 }
4032 }
4033 if trace.ok() {
4034 traceRelease(trace)
4035 }
4036
4037
4038 q := gQueue{glist.head, tail.guintptr(), glist.size}
4039 *glist = gList{}
4040
4041 startIdle := func(n int32) {
4042 for ; n > 0; n-- {
4043 mp := acquirem()
4044 lock(&sched.lock)
4045
4046 pp, _ := pidlegetSpinning(0)
4047 if pp == nil {
4048 unlock(&sched.lock)
4049 releasem(mp)
4050 break
4051 }
4052
4053 startm(pp, false, true)
4054 unlock(&sched.lock)
4055 releasem(mp)
4056 }
4057 }
4058
4059 pp := getg().m.p.ptr()
4060 if pp == nil {
4061 n := q.size
4062 lock(&sched.lock)
4063 globrunqputbatch(&q)
4064 unlock(&sched.lock)
4065 startIdle(n)
4066 return
4067 }
4068
4069 var globq gQueue
4070 npidle := sched.npidle.Load()
4071 for ; npidle > 0 && !q.empty(); npidle-- {
4072 g := q.pop()
4073 globq.pushBack(g)
4074 }
4075 if !globq.empty() {
4076 n := globq.size
4077 lock(&sched.lock)
4078 globrunqputbatch(&globq)
4079 unlock(&sched.lock)
4080 startIdle(n)
4081 }
4082
4083 if runqputbatch(pp, &q); !q.empty() {
4084 lock(&sched.lock)
4085 globrunqputbatch(&q)
4086 unlock(&sched.lock)
4087 }
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102 wakep()
4103 }
4104
4105
4106
4107 func schedule() {
4108 mp := getg().m
4109
4110 if mp.locks != 0 {
4111 throw("schedule: holding locks")
4112 }
4113
4114 if mp.lockedg != 0 {
4115 stoplockedm()
4116 execute(mp.lockedg.ptr(), false)
4117 }
4118
4119
4120
4121 if mp.incgo {
4122 throw("schedule: in cgo")
4123 }
4124
4125 top:
4126 pp := mp.p.ptr()
4127 pp.preempt = false
4128
4129
4130
4131
4132 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4133 throw("schedule: spinning with local work")
4134 }
4135
4136 gp, inheritTime, tryWakeP := findRunnable()
4137
4138
4139
4140
4141 mp.clearAllpSnapshot()
4142
4143 if debug.dontfreezetheworld > 0 && freezing.Load() {
4144
4145
4146
4147
4148
4149
4150
4151 lock(&deadlock)
4152 lock(&deadlock)
4153 }
4154
4155
4156
4157
4158 if mp.spinning {
4159 resetspinning()
4160 }
4161
4162 if sched.disable.user && !schedEnabled(gp) {
4163
4164
4165
4166 lock(&sched.lock)
4167 if schedEnabled(gp) {
4168
4169
4170 unlock(&sched.lock)
4171 } else {
4172 sched.disable.runnable.pushBack(gp)
4173 unlock(&sched.lock)
4174 goto top
4175 }
4176 }
4177
4178
4179
4180 if tryWakeP {
4181 wakep()
4182 }
4183 if gp.lockedm != 0 {
4184
4185
4186 startlockedm(gp)
4187 goto top
4188 }
4189
4190 execute(gp, inheritTime)
4191 }
4192
4193
4194
4195
4196
4197
4198
4199
4200 func dropg() {
4201 gp := getg()
4202
4203 setMNoWB(&gp.m.curg.m, nil)
4204 setGNoWB(&gp.m.curg, nil)
4205 }
4206
4207 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4208 unlock((*mutex)(lock))
4209 return true
4210 }
4211
4212
4213 func park_m(gp *g) {
4214 mp := getg().m
4215
4216 trace := traceAcquire()
4217
4218
4219
4220
4221
4222 bubble := gp.bubble
4223 if bubble != nil {
4224 bubble.incActive()
4225 }
4226
4227 if trace.ok() {
4228
4229
4230
4231 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4232 }
4233
4234
4235 casgstatus(gp, _Grunning, _Gwaiting)
4236 if trace.ok() {
4237 traceRelease(trace)
4238 }
4239
4240 dropg()
4241
4242 if fn := mp.waitunlockf; fn != nil {
4243 ok := fn(gp, mp.waitlock)
4244 mp.waitunlockf = nil
4245 mp.waitlock = nil
4246 if !ok {
4247 trace := traceAcquire()
4248 casgstatus(gp, _Gwaiting, _Grunnable)
4249 if bubble != nil {
4250 bubble.decActive()
4251 }
4252 if trace.ok() {
4253 trace.GoUnpark(gp, 2)
4254 traceRelease(trace)
4255 }
4256 execute(gp, true)
4257 }
4258 }
4259
4260 if bubble != nil {
4261 bubble.decActive()
4262 }
4263
4264 schedule()
4265 }
4266
4267 func goschedImpl(gp *g, preempted bool) {
4268 pp := gp.m.p.ptr()
4269 trace := traceAcquire()
4270 status := readgstatus(gp)
4271 if status&^_Gscan != _Grunning {
4272 dumpgstatus(gp)
4273 throw("bad g status")
4274 }
4275 if trace.ok() {
4276
4277
4278
4279 if preempted {
4280 trace.GoPreempt()
4281 } else {
4282 trace.GoSched()
4283 }
4284 }
4285 casgstatus(gp, _Grunning, _Grunnable)
4286 if trace.ok() {
4287 traceRelease(trace)
4288 }
4289
4290 dropg()
4291 if preempted && sched.gcwaiting.Load() {
4292
4293
4294 runqput(pp, gp, true)
4295 } else {
4296 lock(&sched.lock)
4297 globrunqput(gp)
4298 unlock(&sched.lock)
4299 }
4300
4301 if mainStarted {
4302 wakep()
4303 }
4304
4305 schedule()
4306 }
4307
4308
4309 func gosched_m(gp *g) {
4310 goschedImpl(gp, false)
4311 }
4312
4313
4314 func goschedguarded_m(gp *g) {
4315 if !canPreemptM(gp.m) {
4316 gogo(&gp.sched)
4317 }
4318 goschedImpl(gp, false)
4319 }
4320
4321 func gopreempt_m(gp *g) {
4322 goschedImpl(gp, true)
4323 }
4324
4325
4326
4327
4328 func preemptPark(gp *g) {
4329 status := readgstatus(gp)
4330 if status&^_Gscan != _Grunning {
4331 dumpgstatus(gp)
4332 throw("bad g status")
4333 }
4334
4335 if gp.asyncSafePoint {
4336
4337
4338
4339 f := findfunc(gp.sched.pc)
4340 if !f.valid() {
4341 throw("preempt at unknown pc")
4342 }
4343 if f.flag&abi.FuncFlagSPWrite != 0 {
4344 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4345 throw("preempt SPWRITE")
4346 }
4347 }
4348
4349
4350
4351
4352
4353
4354
4355 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4356 dropg()
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378 trace := traceAcquire()
4379 if trace.ok() {
4380 trace.GoPark(traceBlockPreempted, 0)
4381 }
4382 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4383 if trace.ok() {
4384 traceRelease(trace)
4385 }
4386 schedule()
4387 }
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403 func goyield() {
4404 checkTimeouts()
4405 mcall(goyield_m)
4406 }
4407
4408 func goyield_m(gp *g) {
4409 trace := traceAcquire()
4410 pp := gp.m.p.ptr()
4411 if trace.ok() {
4412
4413
4414
4415 trace.GoPreempt()
4416 }
4417 casgstatus(gp, _Grunning, _Grunnable)
4418 if trace.ok() {
4419 traceRelease(trace)
4420 }
4421 dropg()
4422 runqput(pp, gp, false)
4423 schedule()
4424 }
4425
4426
4427 func goexit1() {
4428 if raceenabled {
4429 if gp := getg(); gp.bubble != nil {
4430 racereleasemergeg(gp, gp.bubble.raceaddr())
4431 }
4432 racegoend()
4433 }
4434 trace := traceAcquire()
4435 if trace.ok() {
4436 trace.GoEnd()
4437 traceRelease(trace)
4438 }
4439 mcall(goexit0)
4440 }
4441
4442
4443 func goexit0(gp *g) {
4444 gdestroy(gp)
4445 schedule()
4446 }
4447
4448 func gdestroy(gp *g) {
4449 mp := getg().m
4450 pp := mp.p.ptr()
4451
4452 casgstatus(gp, _Grunning, _Gdead)
4453 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4454 if isSystemGoroutine(gp, false) {
4455 sched.ngsys.Add(-1)
4456 }
4457 gp.m = nil
4458 locked := gp.lockedm != 0
4459 gp.lockedm = 0
4460 mp.lockedg = 0
4461 gp.preemptStop = false
4462 gp.paniconfault = false
4463 gp._defer = nil
4464 gp._panic = nil
4465 gp.writebuf = nil
4466 gp.waitreason = waitReasonZero
4467 gp.param = nil
4468 gp.labels = nil
4469 gp.timer = nil
4470 gp.bubble = nil
4471
4472 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4473
4474
4475
4476 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4477 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4478 gcController.bgScanCredit.Add(scanCredit)
4479 gp.gcAssistBytes = 0
4480 }
4481
4482 dropg()
4483
4484 if GOARCH == "wasm" {
4485 gfput(pp, gp)
4486 return
4487 }
4488
4489 if locked && mp.lockedInt != 0 {
4490 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4491 if mp.isextra {
4492 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4493 }
4494 throw("exited a goroutine internally locked to the OS thread")
4495 }
4496 gfput(pp, gp)
4497 if locked {
4498
4499
4500
4501
4502
4503
4504 if GOOS != "plan9" {
4505 gogo(&mp.g0.sched)
4506 } else {
4507
4508
4509 mp.lockedExt = 0
4510 }
4511 }
4512 }
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522 func save(pc, sp, bp uintptr) {
4523 gp := getg()
4524
4525 if gp == gp.m.g0 || gp == gp.m.gsignal {
4526
4527
4528
4529
4530
4531 throw("save on system g not allowed")
4532 }
4533
4534 gp.sched.pc = pc
4535 gp.sched.sp = sp
4536 gp.sched.lr = 0
4537 gp.sched.bp = bp
4538
4539
4540
4541 if gp.sched.ctxt != nil {
4542 badctxt()
4543 }
4544 }
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570 func reentersyscall(pc, sp, bp uintptr) {
4571 gp := getg()
4572
4573
4574
4575 gp.m.locks++
4576
4577
4578
4579
4580
4581 gp.stackguard0 = stackPreempt
4582 gp.throwsplit = true
4583
4584
4585 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4586
4587 pp := gp.m.p.ptr()
4588 if pp.runSafePointFn != 0 {
4589
4590 systemstack(runSafePointFn)
4591 }
4592 gp.m.oldp.set(pp)
4593
4594
4595 save(pc, sp, bp)
4596 gp.syscallsp = sp
4597 gp.syscallpc = pc
4598 gp.syscallbp = bp
4599
4600
4601 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4602 systemstack(func() {
4603 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4604 throw("entersyscall")
4605 })
4606 }
4607 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4608 systemstack(func() {
4609 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4610 throw("entersyscall")
4611 })
4612 }
4613 trace := traceAcquire()
4614 if trace.ok() {
4615
4616
4617
4618
4619 systemstack(func() {
4620 trace.GoSysCall()
4621 })
4622
4623 save(pc, sp, bp)
4624 }
4625 if sched.gcwaiting.Load() {
4626
4627
4628
4629 systemstack(func() {
4630 entersyscallHandleGCWait(trace)
4631 })
4632
4633 save(pc, sp, bp)
4634 }
4635
4636
4637
4638
4639
4640 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4641 casgstatus(gp, _Grunning, _Gsyscall)
4642 }
4643 if staticLockRanking {
4644
4645 save(pc, sp, bp)
4646 }
4647 if trace.ok() {
4648
4649
4650
4651 traceRelease(trace)
4652 }
4653 if sched.sysmonwait.Load() {
4654 systemstack(entersyscallWakeSysmon)
4655
4656 save(pc, sp, bp)
4657 }
4658 gp.m.locks--
4659 }
4660
4661
4662
4663
4664 const debugExtendGrunningNoP = false
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680 func entersyscall() {
4681
4682
4683
4684
4685 fp := getcallerfp()
4686 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4687 }
4688
4689 func entersyscallWakeSysmon() {
4690 lock(&sched.lock)
4691 if sched.sysmonwait.Load() {
4692 sched.sysmonwait.Store(false)
4693 notewakeup(&sched.sysmonnote)
4694 }
4695 unlock(&sched.lock)
4696 }
4697
4698 func entersyscallHandleGCWait(trace traceLocker) {
4699 gp := getg()
4700
4701 lock(&sched.lock)
4702 if sched.stopwait > 0 {
4703
4704 pp := gp.m.p.ptr()
4705 pp.m = 0
4706 gp.m.p = 0
4707 atomic.Store(&pp.status, _Pgcstop)
4708
4709 if trace.ok() {
4710 trace.ProcStop(pp)
4711 }
4712 sched.nGsyscallNoP.Add(1)
4713 pp.gcStopTime = nanotime()
4714 pp.syscalltick++
4715 if sched.stopwait--; sched.stopwait == 0 {
4716 notewakeup(&sched.stopnote)
4717 }
4718 }
4719 unlock(&sched.lock)
4720 }
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734 func entersyscallblock() {
4735 gp := getg()
4736
4737 gp.m.locks++
4738 gp.throwsplit = true
4739 gp.stackguard0 = stackPreempt
4740 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4741 gp.m.p.ptr().syscalltick++
4742
4743 sched.nGsyscallNoP.Add(1)
4744
4745
4746 pc := sys.GetCallerPC()
4747 sp := sys.GetCallerSP()
4748 bp := getcallerfp()
4749 save(pc, sp, bp)
4750 gp.syscallsp = gp.sched.sp
4751 gp.syscallpc = gp.sched.pc
4752 gp.syscallbp = gp.sched.bp
4753 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4754 sp1 := sp
4755 sp2 := gp.sched.sp
4756 sp3 := gp.syscallsp
4757 systemstack(func() {
4758 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4759 throw("entersyscallblock")
4760 })
4761 }
4762
4763
4764
4765
4766
4767
4768 trace := traceAcquire()
4769 systemstack(func() {
4770 if trace.ok() {
4771 trace.GoSysCall()
4772 }
4773 handoffp(releasep())
4774 })
4775
4776
4777
4778 if debugExtendGrunningNoP {
4779 usleep(10)
4780 }
4781 casgstatus(gp, _Grunning, _Gsyscall)
4782 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4783 systemstack(func() {
4784 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4785 throw("entersyscallblock")
4786 })
4787 }
4788 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4789 systemstack(func() {
4790 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4791 throw("entersyscallblock")
4792 })
4793 }
4794 if trace.ok() {
4795 systemstack(func() {
4796 traceRelease(trace)
4797 })
4798 }
4799
4800
4801 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4802
4803 gp.m.locks--
4804 }
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826 func exitsyscall() {
4827 gp := getg()
4828
4829 gp.m.locks++
4830 if sys.GetCallerSP() > gp.syscallsp {
4831 throw("exitsyscall: syscall frame is no longer valid")
4832 }
4833 gp.waitsince = 0
4834
4835 if sched.stopwait == freezeStopWait {
4836
4837
4838
4839 systemstack(func() {
4840 lock(&deadlock)
4841 lock(&deadlock)
4842 })
4843 }
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4857 casgstatus(gp, _Gsyscall, _Grunning)
4858 }
4859
4860
4861
4862
4863 if debugExtendGrunningNoP {
4864 usleep(10)
4865 }
4866
4867
4868 oldp := gp.m.oldp.ptr()
4869 gp.m.oldp.set(nil)
4870
4871
4872 pp := gp.m.p.ptr()
4873 if pp != nil {
4874
4875 if trace := traceAcquire(); trace.ok() {
4876 systemstack(func() {
4877
4878
4879
4880
4881
4882
4883
4884
4885 if pp.syscalltick == gp.m.syscalltick {
4886 trace.GoSysExit(false)
4887 } else {
4888
4889
4890
4891
4892 trace.ProcSteal(pp)
4893 trace.ProcStart()
4894 trace.GoSysExit(true)
4895 trace.GoStart()
4896 }
4897 traceRelease(trace)
4898 })
4899 }
4900 } else {
4901
4902 systemstack(func() {
4903
4904 if pp := exitsyscallTryGetP(oldp); pp != nil {
4905
4906 acquirepNoTrace(pp)
4907
4908
4909 if trace := traceAcquire(); trace.ok() {
4910 trace.ProcStart()
4911 trace.GoSysExit(true)
4912 trace.GoStart()
4913 traceRelease(trace)
4914 }
4915 }
4916 })
4917 pp = gp.m.p.ptr()
4918 }
4919
4920
4921 if pp != nil {
4922 if goroutineProfile.active {
4923
4924
4925
4926 systemstack(func() {
4927 tryRecordGoroutineProfileWB(gp)
4928 })
4929 }
4930
4931
4932 pp.syscalltick++
4933
4934
4935
4936 gp.syscallsp = 0
4937 gp.m.locks--
4938 if gp.preempt {
4939
4940 gp.stackguard0 = stackPreempt
4941 } else {
4942
4943 gp.stackguard0 = gp.stack.lo + stackGuard
4944 }
4945 gp.throwsplit = false
4946
4947 if sched.disable.user && !schedEnabled(gp) {
4948
4949 Gosched()
4950 }
4951 return
4952 }
4953
4954 gp.m.locks--
4955
4956
4957 mcall(exitsyscallNoP)
4958
4959
4960
4961
4962
4963
4964
4965 gp.syscallsp = 0
4966 gp.m.p.ptr().syscalltick++
4967 gp.throwsplit = false
4968 }
4969
4970
4971
4972
4973
4974
4975
4976 func exitsyscallTryGetP(oldp *p) *p {
4977
4978 if oldp != nil {
4979 if thread, ok := setBlockOnExitSyscall(oldp); ok {
4980 thread.takeP()
4981 thread.resume()
4982 sched.nGsyscallNoP.Add(-1)
4983 return oldp
4984 }
4985 }
4986
4987
4988 if sched.pidle != 0 {
4989 lock(&sched.lock)
4990 pp, _ := pidleget(0)
4991 if pp != nil && sched.sysmonwait.Load() {
4992 sched.sysmonwait.Store(false)
4993 notewakeup(&sched.sysmonnote)
4994 }
4995 unlock(&sched.lock)
4996 if pp != nil {
4997 sched.nGsyscallNoP.Add(-1)
4998 return pp
4999 }
5000 }
5001 return nil
5002 }
5003
5004
5005
5006
5007
5008
5009
5010 func exitsyscallNoP(gp *g) {
5011 traceExitingSyscall()
5012 trace := traceAcquire()
5013 casgstatus(gp, _Grunning, _Grunnable)
5014 traceExitedSyscall()
5015 if trace.ok() {
5016
5017
5018
5019
5020 trace.GoSysExit(true)
5021 traceRelease(trace)
5022 }
5023 sched.nGsyscallNoP.Add(-1)
5024 dropg()
5025 lock(&sched.lock)
5026 var pp *p
5027 if schedEnabled(gp) {
5028 pp, _ = pidleget(0)
5029 }
5030 var locked bool
5031 if pp == nil {
5032 globrunqput(gp)
5033
5034
5035
5036
5037
5038
5039 locked = gp.lockedm != 0
5040 } else if sched.sysmonwait.Load() {
5041 sched.sysmonwait.Store(false)
5042 notewakeup(&sched.sysmonnote)
5043 }
5044 unlock(&sched.lock)
5045 if pp != nil {
5046 acquirep(pp)
5047 execute(gp, false)
5048 }
5049 if locked {
5050
5051
5052
5053
5054 stoplockedm()
5055 execute(gp, false)
5056 }
5057 stopm()
5058 schedule()
5059 }
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073 func syscall_runtime_BeforeFork() {
5074 gp := getg().m.curg
5075
5076
5077
5078
5079 gp.m.locks++
5080 sigsave(&gp.m.sigmask)
5081 sigblock(false)
5082
5083
5084
5085
5086
5087 gp.stackguard0 = stackFork
5088 }
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102 func syscall_runtime_AfterFork() {
5103 gp := getg().m.curg
5104
5105
5106 gp.stackguard0 = gp.stack.lo + stackGuard
5107
5108 msigrestore(gp.m.sigmask)
5109
5110 gp.m.locks--
5111 }
5112
5113
5114
5115 var inForkedChild bool
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136 func syscall_runtime_AfterForkInChild() {
5137
5138
5139
5140
5141 inForkedChild = true
5142
5143 clearSignalHandlers()
5144
5145
5146
5147 msigrestore(getg().m.sigmask)
5148
5149 inForkedChild = false
5150 }
5151
5152
5153
5154
5155 var pendingPreemptSignals atomic.Int32
5156
5157
5158
5159
5160 func syscall_runtime_BeforeExec() {
5161
5162 execLock.lock()
5163
5164
5165
5166 if GOOS == "darwin" || GOOS == "ios" {
5167 for pendingPreemptSignals.Load() > 0 {
5168 osyield()
5169 }
5170 }
5171 }
5172
5173
5174
5175
5176 func syscall_runtime_AfterExec() {
5177 execLock.unlock()
5178 }
5179
5180
5181 func malg(stacksize int32) *g {
5182 newg := new(g)
5183 if stacksize >= 0 {
5184 stacksize = round2(stackSystem + stacksize)
5185 systemstack(func() {
5186 newg.stack = stackalloc(uint32(stacksize))
5187 if valgrindenabled {
5188 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5189 }
5190 })
5191 newg.stackguard0 = newg.stack.lo + stackGuard
5192 newg.stackguard1 = ^uintptr(0)
5193
5194
5195 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5196 }
5197 return newg
5198 }
5199
5200
5201
5202
5203 func newproc(fn *funcval) {
5204 gp := getg()
5205 pc := sys.GetCallerPC()
5206 systemstack(func() {
5207 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5208
5209 pp := getg().m.p.ptr()
5210 runqput(pp, newg, true)
5211
5212 if mainStarted {
5213 wakep()
5214 }
5215 })
5216 }
5217
5218
5219
5220
5221 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5222 if fn == nil {
5223 fatal("go of nil func value")
5224 }
5225
5226 mp := acquirem()
5227 pp := mp.p.ptr()
5228 newg := gfget(pp)
5229 if newg == nil {
5230 newg = malg(stackMin)
5231 casgstatus(newg, _Gidle, _Gdead)
5232 allgadd(newg)
5233 }
5234 if newg.stack.hi == 0 {
5235 throw("newproc1: newg missing stack")
5236 }
5237
5238 if readgstatus(newg) != _Gdead {
5239 throw("newproc1: new g is not Gdead")
5240 }
5241
5242 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5243 totalSize = alignUp(totalSize, sys.StackAlign)
5244 sp := newg.stack.hi - totalSize
5245 if usesLR {
5246
5247 *(*uintptr)(unsafe.Pointer(sp)) = 0
5248 prepGoExitFrame(sp)
5249 }
5250 if GOARCH == "arm64" {
5251
5252 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5253 }
5254
5255 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5256 newg.sched.sp = sp
5257 newg.stktopsp = sp
5258 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5259 newg.sched.g = guintptr(unsafe.Pointer(newg))
5260 gostartcallfn(&newg.sched, fn)
5261 newg.parentGoid = callergp.goid
5262 newg.gopc = callerpc
5263 newg.ancestors = saveAncestors(callergp)
5264 newg.startpc = fn.fn
5265 newg.runningCleanups.Store(false)
5266 if isSystemGoroutine(newg, false) {
5267 sched.ngsys.Add(1)
5268 } else {
5269
5270 newg.bubble = callergp.bubble
5271 if mp.curg != nil {
5272 newg.labels = mp.curg.labels
5273 }
5274 if goroutineProfile.active {
5275
5276
5277
5278
5279
5280 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5281 }
5282 }
5283
5284 newg.trackingSeq = uint8(cheaprand())
5285 if newg.trackingSeq%gTrackingPeriod == 0 {
5286 newg.tracking = true
5287 }
5288 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5289
5290
5291
5292 trace := traceAcquire()
5293 var status uint32 = _Grunnable
5294 if parked {
5295 status = _Gwaiting
5296 newg.waitreason = waitreason
5297 }
5298 if pp.goidcache == pp.goidcacheend {
5299
5300
5301
5302 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5303 pp.goidcache -= _GoidCacheBatch - 1
5304 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5305 }
5306 newg.goid = pp.goidcache
5307 casgstatus(newg, _Gdead, status)
5308 pp.goidcache++
5309 newg.trace.reset()
5310 if trace.ok() {
5311 trace.GoCreate(newg, newg.startpc, parked)
5312 traceRelease(trace)
5313 }
5314
5315
5316 if raceenabled {
5317 newg.racectx = racegostart(callerpc)
5318 newg.raceignore = 0
5319 if newg.labels != nil {
5320
5321
5322 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5323 }
5324 }
5325 pp.goroutinesCreated++
5326 releasem(mp)
5327
5328 return newg
5329 }
5330
5331
5332
5333
5334 func saveAncestors(callergp *g) *[]ancestorInfo {
5335
5336 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5337 return nil
5338 }
5339 var callerAncestors []ancestorInfo
5340 if callergp.ancestors != nil {
5341 callerAncestors = *callergp.ancestors
5342 }
5343 n := int32(len(callerAncestors)) + 1
5344 if n > debug.tracebackancestors {
5345 n = debug.tracebackancestors
5346 }
5347 ancestors := make([]ancestorInfo, n)
5348 copy(ancestors[1:], callerAncestors)
5349
5350 var pcs [tracebackInnerFrames]uintptr
5351 npcs := gcallers(callergp, 0, pcs[:])
5352 ipcs := make([]uintptr, npcs)
5353 copy(ipcs, pcs[:])
5354 ancestors[0] = ancestorInfo{
5355 pcs: ipcs,
5356 goid: callergp.goid,
5357 gopc: callergp.gopc,
5358 }
5359
5360 ancestorsp := new([]ancestorInfo)
5361 *ancestorsp = ancestors
5362 return ancestorsp
5363 }
5364
5365
5366
5367 func gfput(pp *p, gp *g) {
5368 if readgstatus(gp) != _Gdead {
5369 throw("gfput: bad status (not Gdead)")
5370 }
5371
5372 stksize := gp.stack.hi - gp.stack.lo
5373
5374 if stksize != uintptr(startingStackSize) {
5375
5376 stackfree(gp.stack)
5377 gp.stack.lo = 0
5378 gp.stack.hi = 0
5379 gp.stackguard0 = 0
5380 if valgrindenabled {
5381 valgrindDeregisterStack(gp.valgrindStackID)
5382 gp.valgrindStackID = 0
5383 }
5384 }
5385
5386 pp.gFree.push(gp)
5387 if pp.gFree.size >= 64 {
5388 var (
5389 stackQ gQueue
5390 noStackQ gQueue
5391 )
5392 for pp.gFree.size >= 32 {
5393 gp := pp.gFree.pop()
5394 if gp.stack.lo == 0 {
5395 noStackQ.push(gp)
5396 } else {
5397 stackQ.push(gp)
5398 }
5399 }
5400 lock(&sched.gFree.lock)
5401 sched.gFree.noStack.pushAll(noStackQ)
5402 sched.gFree.stack.pushAll(stackQ)
5403 unlock(&sched.gFree.lock)
5404 }
5405 }
5406
5407
5408
5409 func gfget(pp *p) *g {
5410 retry:
5411 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5412 lock(&sched.gFree.lock)
5413
5414 for pp.gFree.size < 32 {
5415
5416 gp := sched.gFree.stack.pop()
5417 if gp == nil {
5418 gp = sched.gFree.noStack.pop()
5419 if gp == nil {
5420 break
5421 }
5422 }
5423 pp.gFree.push(gp)
5424 }
5425 unlock(&sched.gFree.lock)
5426 goto retry
5427 }
5428 gp := pp.gFree.pop()
5429 if gp == nil {
5430 return nil
5431 }
5432 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5433
5434
5435
5436 systemstack(func() {
5437 stackfree(gp.stack)
5438 gp.stack.lo = 0
5439 gp.stack.hi = 0
5440 gp.stackguard0 = 0
5441 if valgrindenabled {
5442 valgrindDeregisterStack(gp.valgrindStackID)
5443 gp.valgrindStackID = 0
5444 }
5445 })
5446 }
5447 if gp.stack.lo == 0 {
5448
5449 systemstack(func() {
5450 gp.stack = stackalloc(startingStackSize)
5451 if valgrindenabled {
5452 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5453 }
5454 })
5455 gp.stackguard0 = gp.stack.lo + stackGuard
5456 } else {
5457 if raceenabled {
5458 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5459 }
5460 if msanenabled {
5461 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5462 }
5463 if asanenabled {
5464 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5465 }
5466 }
5467 return gp
5468 }
5469
5470
5471 func gfpurge(pp *p) {
5472 var (
5473 stackQ gQueue
5474 noStackQ gQueue
5475 )
5476 for !pp.gFree.empty() {
5477 gp := pp.gFree.pop()
5478 if gp.stack.lo == 0 {
5479 noStackQ.push(gp)
5480 } else {
5481 stackQ.push(gp)
5482 }
5483 }
5484 lock(&sched.gFree.lock)
5485 sched.gFree.noStack.pushAll(noStackQ)
5486 sched.gFree.stack.pushAll(stackQ)
5487 unlock(&sched.gFree.lock)
5488 }
5489
5490
5491 func Breakpoint() {
5492 breakpoint()
5493 }
5494
5495
5496
5497
5498
5499
5500 func dolockOSThread() {
5501 if GOARCH == "wasm" {
5502 return
5503 }
5504 gp := getg()
5505 gp.m.lockedg.set(gp)
5506 gp.lockedm.set(gp.m)
5507 }
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525 func LockOSThread() {
5526 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5527
5528
5529
5530 startTemplateThread()
5531 }
5532 gp := getg()
5533 gp.m.lockedExt++
5534 if gp.m.lockedExt == 0 {
5535 gp.m.lockedExt--
5536 panic("LockOSThread nesting overflow")
5537 }
5538 dolockOSThread()
5539 }
5540
5541
5542 func lockOSThread() {
5543 getg().m.lockedInt++
5544 dolockOSThread()
5545 }
5546
5547
5548
5549
5550
5551
5552 func dounlockOSThread() {
5553 if GOARCH == "wasm" {
5554 return
5555 }
5556 gp := getg()
5557 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5558 return
5559 }
5560 gp.m.lockedg = 0
5561 gp.lockedm = 0
5562 }
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578 func UnlockOSThread() {
5579 gp := getg()
5580 if gp.m.lockedExt == 0 {
5581 return
5582 }
5583 gp.m.lockedExt--
5584 dounlockOSThread()
5585 }
5586
5587
5588 func unlockOSThread() {
5589 gp := getg()
5590 if gp.m.lockedInt == 0 {
5591 systemstack(badunlockosthread)
5592 }
5593 gp.m.lockedInt--
5594 dounlockOSThread()
5595 }
5596
5597 func badunlockosthread() {
5598 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5599 }
5600
5601 func gcount(includeSys bool) int32 {
5602 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5603 if !includeSys {
5604 n -= sched.ngsys.Load()
5605 }
5606 for _, pp := range allp {
5607 n -= pp.gFree.size
5608 }
5609
5610
5611
5612 if n < 1 {
5613 n = 1
5614 }
5615 return n
5616 }
5617
5618
5619
5620
5621
5622 func goroutineleakcount() int {
5623 return work.goroutineLeak.count
5624 }
5625
5626 func mcount() int32 {
5627 return int32(sched.mnext - sched.nmfreed)
5628 }
5629
5630 var prof struct {
5631 signalLock atomic.Uint32
5632
5633
5634
5635 hz atomic.Int32
5636 }
5637
5638 func _System() { _System() }
5639 func _ExternalCode() { _ExternalCode() }
5640 func _LostExternalCode() { _LostExternalCode() }
5641 func _GC() { _GC() }
5642 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5643 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5644 func _VDSO() { _VDSO() }
5645
5646
5647
5648
5649
5650 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5651 if prof.hz.Load() == 0 {
5652 return
5653 }
5654
5655
5656
5657
5658 if mp != nil && mp.profilehz == 0 {
5659 return
5660 }
5661
5662
5663
5664
5665
5666
5667
5668 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5669 if f := findfunc(pc); f.valid() {
5670 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5671 cpuprof.lostAtomic++
5672 return
5673 }
5674 }
5675 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5676
5677
5678
5679 cpuprof.lostAtomic++
5680 return
5681 }
5682 }
5683
5684
5685
5686
5687
5688
5689
5690 getg().m.mallocing++
5691
5692 var u unwinder
5693 var stk [maxCPUProfStack]uintptr
5694 n := 0
5695 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5696 cgoOff := 0
5697
5698
5699
5700
5701
5702 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5703 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5704 cgoOff++
5705 }
5706 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5707 mp.cgoCallers[0] = 0
5708 }
5709
5710
5711 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5712 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5713
5714
5715 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5716 } else if mp != nil && mp.vdsoSP != 0 {
5717
5718
5719 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5720 } else {
5721 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5722 }
5723 n += tracebackPCs(&u, 0, stk[n:])
5724
5725 if n <= 0 {
5726
5727
5728 n = 2
5729 if inVDSOPage(pc) {
5730 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5731 } else if pc > firstmoduledata.etext {
5732
5733 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5734 }
5735 stk[0] = pc
5736 if mp.preemptoff != "" {
5737 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5738 } else {
5739 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5740 }
5741 }
5742
5743 if prof.hz.Load() != 0 {
5744
5745
5746
5747 var tagPtr *unsafe.Pointer
5748 if gp != nil && gp.m != nil && gp.m.curg != nil {
5749 tagPtr = &gp.m.curg.labels
5750 }
5751 cpuprof.add(tagPtr, stk[:n])
5752
5753 gprof := gp
5754 var mp *m
5755 var pp *p
5756 if gp != nil && gp.m != nil {
5757 if gp.m.curg != nil {
5758 gprof = gp.m.curg
5759 }
5760 mp = gp.m
5761 pp = gp.m.p.ptr()
5762 }
5763 traceCPUSample(gprof, mp, pp, stk[:n])
5764 }
5765 getg().m.mallocing--
5766 }
5767
5768
5769
5770 func setcpuprofilerate(hz int32) {
5771
5772 if hz < 0 {
5773 hz = 0
5774 }
5775
5776
5777
5778 gp := getg()
5779 gp.m.locks++
5780
5781
5782
5783
5784 setThreadCPUProfiler(0)
5785
5786 for !prof.signalLock.CompareAndSwap(0, 1) {
5787 osyield()
5788 }
5789 if prof.hz.Load() != hz {
5790 setProcessCPUProfiler(hz)
5791 prof.hz.Store(hz)
5792 }
5793 prof.signalLock.Store(0)
5794
5795 lock(&sched.lock)
5796 sched.profilehz = hz
5797 unlock(&sched.lock)
5798
5799 if hz != 0 {
5800 setThreadCPUProfiler(hz)
5801 }
5802
5803 gp.m.locks--
5804 }
5805
5806
5807
5808 func (pp *p) init(id int32) {
5809 pp.id = id
5810 pp.gcw.id = id
5811 pp.status = _Pgcstop
5812 pp.sudogcache = pp.sudogbuf[:0]
5813 pp.deferpool = pp.deferpoolbuf[:0]
5814 pp.wbBuf.reset()
5815 if pp.mcache == nil {
5816 if id == 0 {
5817 if mcache0 == nil {
5818 throw("missing mcache?")
5819 }
5820
5821
5822 pp.mcache = mcache0
5823 } else {
5824 pp.mcache = allocmcache()
5825 }
5826 }
5827 if raceenabled && pp.raceprocctx == 0 {
5828 if id == 0 {
5829 pp.raceprocctx = raceprocctx0
5830 raceprocctx0 = 0
5831 } else {
5832 pp.raceprocctx = raceproccreate()
5833 }
5834 }
5835 lockInit(&pp.timers.mu, lockRankTimers)
5836
5837
5838
5839 timerpMask.set(id)
5840
5841
5842 idlepMask.clear(id)
5843 }
5844
5845
5846
5847
5848
5849 func (pp *p) destroy() {
5850 assertLockHeld(&sched.lock)
5851 assertWorldStopped()
5852
5853
5854 for pp.runqhead != pp.runqtail {
5855
5856 pp.runqtail--
5857 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5858
5859 globrunqputhead(gp)
5860 }
5861 if pp.runnext != 0 {
5862 globrunqputhead(pp.runnext.ptr())
5863 pp.runnext = 0
5864 }
5865
5866
5867 getg().m.p.ptr().timers.take(&pp.timers)
5868
5869
5870
5871 if phase := gcphase; phase != _GCoff {
5872 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5873 throw("P destroyed while GC is running")
5874 }
5875
5876 pp.gcw.spanq.destroy()
5877
5878 clear(pp.sudogbuf[:])
5879 pp.sudogcache = pp.sudogbuf[:0]
5880 pp.pinnerCache = nil
5881 clear(pp.deferpoolbuf[:])
5882 pp.deferpool = pp.deferpoolbuf[:0]
5883 systemstack(func() {
5884 for i := 0; i < pp.mspancache.len; i++ {
5885
5886 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5887 }
5888 pp.mspancache.len = 0
5889 lock(&mheap_.lock)
5890 pp.pcache.flush(&mheap_.pages)
5891 unlock(&mheap_.lock)
5892 })
5893 freemcache(pp.mcache)
5894 pp.mcache = nil
5895 gfpurge(pp)
5896 if raceenabled {
5897 if pp.timers.raceCtx != 0 {
5898
5899
5900
5901
5902
5903 mp := getg().m
5904 phold := mp.p.ptr()
5905 mp.p.set(pp)
5906
5907 racectxend(pp.timers.raceCtx)
5908 pp.timers.raceCtx = 0
5909
5910 mp.p.set(phold)
5911 }
5912 raceprocdestroy(pp.raceprocctx)
5913 pp.raceprocctx = 0
5914 }
5915 pp.gcAssistTime = 0
5916 gcCleanups.queued += pp.cleanupsQueued
5917 pp.cleanupsQueued = 0
5918 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5919 pp.goroutinesCreated = 0
5920 pp.xRegs.free()
5921 pp.status = _Pdead
5922 }
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932 func procresize(nprocs int32) *p {
5933 assertLockHeld(&sched.lock)
5934 assertWorldStopped()
5935
5936 old := gomaxprocs
5937 if old < 0 || nprocs <= 0 {
5938 throw("procresize: invalid arg")
5939 }
5940 trace := traceAcquire()
5941 if trace.ok() {
5942 trace.Gomaxprocs(nprocs)
5943 traceRelease(trace)
5944 }
5945
5946
5947 now := nanotime()
5948 if sched.procresizetime != 0 {
5949 sched.totaltime += int64(old) * (now - sched.procresizetime)
5950 }
5951 sched.procresizetime = now
5952
5953
5954 if nprocs > int32(len(allp)) {
5955
5956
5957 lock(&allpLock)
5958 if nprocs <= int32(cap(allp)) {
5959 allp = allp[:nprocs]
5960 } else {
5961 nallp := make([]*p, nprocs)
5962
5963
5964 copy(nallp, allp[:cap(allp)])
5965 allp = nallp
5966 }
5967
5968 idlepMask = idlepMask.resize(nprocs)
5969 timerpMask = timerpMask.resize(nprocs)
5970 work.spanqMask = work.spanqMask.resize(nprocs)
5971 unlock(&allpLock)
5972 }
5973
5974
5975 for i := old; i < nprocs; i++ {
5976 pp := allp[i]
5977 if pp == nil {
5978 pp = new(p)
5979 }
5980 pp.init(i)
5981 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5982 }
5983
5984 gp := getg()
5985 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5986
5987 gp.m.p.ptr().status = _Prunning
5988 gp.m.p.ptr().mcache.prepareForSweep()
5989 } else {
5990
5991
5992
5993
5994
5995 if gp.m.p != 0 {
5996 trace := traceAcquire()
5997 if trace.ok() {
5998
5999
6000
6001 trace.GoSched()
6002 trace.ProcStop(gp.m.p.ptr())
6003 traceRelease(trace)
6004 }
6005 gp.m.p.ptr().m = 0
6006 }
6007 gp.m.p = 0
6008 pp := allp[0]
6009 pp.m = 0
6010 pp.status = _Pidle
6011 acquirep(pp)
6012 trace := traceAcquire()
6013 if trace.ok() {
6014 trace.GoStart()
6015 traceRelease(trace)
6016 }
6017 }
6018
6019
6020 mcache0 = nil
6021
6022
6023 for i := nprocs; i < old; i++ {
6024 pp := allp[i]
6025 pp.destroy()
6026
6027 }
6028
6029
6030 if int32(len(allp)) != nprocs {
6031 lock(&allpLock)
6032 allp = allp[:nprocs]
6033 idlepMask = idlepMask.resize(nprocs)
6034 timerpMask = timerpMask.resize(nprocs)
6035 work.spanqMask = work.spanqMask.resize(nprocs)
6036 unlock(&allpLock)
6037 }
6038
6039 var runnablePs *p
6040 var runnablePsNeedM *p
6041 for i := nprocs - 1; i >= 0; i-- {
6042 pp := allp[i]
6043 if gp.m.p.ptr() == pp {
6044 continue
6045 }
6046 pp.status = _Pidle
6047 if runqempty(pp) {
6048 pidleput(pp, now)
6049 continue
6050 }
6051
6052
6053
6054
6055
6056
6057
6058
6059 var mp *m
6060 if oldm := pp.oldm.get(); oldm != nil {
6061
6062 mp = mgetSpecific(oldm)
6063 }
6064 if mp == nil {
6065
6066 pp.link.set(runnablePsNeedM)
6067 runnablePsNeedM = pp
6068 continue
6069 }
6070 pp.m.set(mp)
6071 pp.link.set(runnablePs)
6072 runnablePs = pp
6073 }
6074 for runnablePsNeedM != nil {
6075 pp := runnablePsNeedM
6076 runnablePsNeedM = pp.link.ptr()
6077
6078 mp := mget()
6079 pp.m.set(mp)
6080 pp.link.set(runnablePs)
6081 runnablePs = pp
6082 }
6083
6084 stealOrder.reset(uint32(nprocs))
6085 var int32p *int32 = &gomaxprocs
6086 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6087 if old != nprocs {
6088
6089 gcCPULimiter.resetCapacity(now, nprocs)
6090 }
6091 return runnablePs
6092 }
6093
6094
6095
6096
6097
6098
6099
6100 func acquirep(pp *p) {
6101
6102 acquirepNoTrace(pp)
6103
6104
6105 trace := traceAcquire()
6106 if trace.ok() {
6107 trace.ProcStart()
6108 traceRelease(trace)
6109 }
6110 }
6111
6112
6113
6114
6115 func acquirepNoTrace(pp *p) {
6116
6117 wirep(pp)
6118
6119
6120
6121
6122
6123
6124 pp.oldm = pp.m.ptr().self
6125
6126
6127
6128 pp.mcache.prepareForSweep()
6129 }
6130
6131
6132
6133
6134
6135
6136
6137 func wirep(pp *p) {
6138 gp := getg()
6139
6140 if gp.m.p != 0 {
6141
6142
6143 systemstack(func() {
6144 throw("wirep: already in go")
6145 })
6146 }
6147 if pp.m != 0 || pp.status != _Pidle {
6148
6149
6150 systemstack(func() {
6151 id := int64(0)
6152 if pp.m != 0 {
6153 id = pp.m.ptr().id
6154 }
6155 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6156 throw("wirep: invalid p state")
6157 })
6158 }
6159 gp.m.p.set(pp)
6160 pp.m.set(gp.m)
6161 pp.status = _Prunning
6162 }
6163
6164
6165 func releasep() *p {
6166 trace := traceAcquire()
6167 if trace.ok() {
6168 trace.ProcStop(getg().m.p.ptr())
6169 traceRelease(trace)
6170 }
6171 return releasepNoTrace()
6172 }
6173
6174
6175 func releasepNoTrace() *p {
6176 gp := getg()
6177
6178 if gp.m.p == 0 {
6179 throw("releasep: invalid arg")
6180 }
6181 pp := gp.m.p.ptr()
6182 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6183 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6184 throw("releasep: invalid p state")
6185 }
6186 gp.m.p = 0
6187 pp.m = 0
6188 pp.status = _Pidle
6189 return pp
6190 }
6191
6192 func incidlelocked(v int32) {
6193 lock(&sched.lock)
6194 sched.nmidlelocked += v
6195 if v > 0 {
6196 checkdead()
6197 }
6198 unlock(&sched.lock)
6199 }
6200
6201
6202
6203
6204 func checkdead() {
6205 assertLockHeld(&sched.lock)
6206
6207
6208
6209
6210
6211
6212 if (islibrary || isarchive) && GOARCH != "wasm" {
6213 return
6214 }
6215
6216
6217
6218
6219
6220 if panicking.Load() > 0 {
6221 return
6222 }
6223
6224
6225
6226
6227
6228 var run0 int32
6229 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6230 run0 = 1
6231 }
6232
6233 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6234 if run > run0 {
6235 return
6236 }
6237 if run < 0 {
6238 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6239 unlock(&sched.lock)
6240 throw("checkdead: inconsistent counts")
6241 }
6242
6243 grunning := 0
6244 forEachG(func(gp *g) {
6245 if isSystemGoroutine(gp, false) {
6246 return
6247 }
6248 s := readgstatus(gp)
6249 switch s &^ _Gscan {
6250 case _Gwaiting,
6251 _Gpreempted:
6252 grunning++
6253 case _Grunnable,
6254 _Grunning,
6255 _Gsyscall:
6256 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6257 unlock(&sched.lock)
6258 throw("checkdead: runnable g")
6259 }
6260 })
6261 if grunning == 0 {
6262 unlock(&sched.lock)
6263 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6264 }
6265
6266
6267 if faketime != 0 {
6268 if when := timeSleepUntil(); when < maxWhen {
6269 faketime = when
6270
6271
6272 pp, _ := pidleget(faketime)
6273 if pp == nil {
6274
6275
6276 unlock(&sched.lock)
6277 throw("checkdead: no p for timer")
6278 }
6279 mp := mget()
6280 if mp == nil {
6281
6282
6283 unlock(&sched.lock)
6284 throw("checkdead: no m for timer")
6285 }
6286
6287
6288
6289 sched.nmspinning.Add(1)
6290 mp.spinning = true
6291 mp.nextp.set(pp)
6292 notewakeup(&mp.park)
6293 return
6294 }
6295 }
6296
6297
6298 for _, pp := range allp {
6299 if len(pp.timers.heap) > 0 {
6300 return
6301 }
6302 }
6303
6304 unlock(&sched.lock)
6305 fatal("all goroutines are asleep - deadlock!")
6306 }
6307
6308
6309
6310
6311
6312
6313 var forcegcperiod int64 = 2 * 60 * 1e9
6314
6315
6316
6317
6318 const haveSysmon = GOARCH != "wasm"
6319
6320
6321
6322
6323 func sysmon() {
6324 lock(&sched.lock)
6325 sched.nmsys++
6326 checkdead()
6327 unlock(&sched.lock)
6328
6329 lastgomaxprocs := int64(0)
6330 lasttrace := int64(0)
6331 idle := 0
6332 delay := uint32(0)
6333
6334 for {
6335 if idle == 0 {
6336 delay = 20
6337 } else if idle > 50 {
6338 delay *= 2
6339 }
6340 if delay > 10*1000 {
6341 delay = 10 * 1000
6342 }
6343 usleep(delay)
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360 now := nanotime()
6361 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6362 lock(&sched.lock)
6363 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6364 syscallWake := false
6365 next := timeSleepUntil()
6366 if next > now {
6367 sched.sysmonwait.Store(true)
6368 unlock(&sched.lock)
6369
6370
6371 sleep := forcegcperiod / 2
6372 if next-now < sleep {
6373 sleep = next - now
6374 }
6375 shouldRelax := sleep >= osRelaxMinNS
6376 if shouldRelax {
6377 osRelax(true)
6378 }
6379 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6380 if shouldRelax {
6381 osRelax(false)
6382 }
6383 lock(&sched.lock)
6384 sched.sysmonwait.Store(false)
6385 noteclear(&sched.sysmonnote)
6386 }
6387 if syscallWake {
6388 idle = 0
6389 delay = 20
6390 }
6391 }
6392 unlock(&sched.lock)
6393 }
6394
6395 lock(&sched.sysmonlock)
6396
6397
6398 now = nanotime()
6399
6400
6401 if *cgo_yield != nil {
6402 asmcgocall(*cgo_yield, nil)
6403 }
6404
6405 lastpoll := sched.lastpoll.Load()
6406 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6407 sched.lastpoll.CompareAndSwap(lastpoll, now)
6408 list, delta := netpoll(0)
6409 if !list.empty() {
6410
6411
6412
6413
6414
6415
6416
6417 incidlelocked(-1)
6418 injectglist(&list)
6419 incidlelocked(1)
6420 netpollAdjustWaiters(delta)
6421 }
6422 }
6423
6424 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6425 sysmonUpdateGOMAXPROCS()
6426 lastgomaxprocs = now
6427 }
6428 if scavenger.sysmonWake.Load() != 0 {
6429
6430 scavenger.wake()
6431 }
6432
6433
6434 if retake(now) != 0 {
6435 idle = 0
6436 } else {
6437 idle++
6438 }
6439
6440 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6441 lock(&forcegc.lock)
6442 forcegc.idle.Store(false)
6443 var list gList
6444 list.push(forcegc.g)
6445 injectglist(&list)
6446 unlock(&forcegc.lock)
6447 }
6448 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6449 lasttrace = now
6450 schedtrace(debug.scheddetail > 0)
6451 }
6452 unlock(&sched.sysmonlock)
6453 }
6454 }
6455
6456 type sysmontick struct {
6457 schedtick uint32
6458 syscalltick uint32
6459 schedwhen int64
6460 syscallwhen int64
6461 }
6462
6463
6464
6465 const forcePreemptNS = 10 * 1000 * 1000
6466
6467 func retake(now int64) uint32 {
6468 n := 0
6469
6470
6471 lock(&allpLock)
6472
6473
6474
6475 for i := 0; i < len(allp); i++ {
6476
6477
6478
6479
6480
6481
6482
6483
6484 pp := allp[i]
6485 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6486
6487
6488 continue
6489 }
6490 pd := &pp.sysmontick
6491 sysretake := false
6492
6493
6494
6495
6496
6497 schedt := int64(pp.schedtick)
6498 if int64(pd.schedtick) != schedt {
6499 pd.schedtick = uint32(schedt)
6500 pd.schedwhen = now
6501 } else if pd.schedwhen+forcePreemptNS <= now {
6502 preemptone(pp)
6503
6504
6505
6506
6507 sysretake = true
6508 }
6509
6510
6511 unlock(&allpLock)
6512
6513
6514
6515
6516
6517
6518
6519
6520 incidlelocked(-1)
6521
6522
6523 thread, ok := setBlockOnExitSyscall(pp)
6524 if !ok {
6525
6526 goto done
6527 }
6528
6529
6530 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6531 pd.syscalltick = uint32(syst)
6532 pd.syscallwhen = now
6533 thread.resume()
6534 goto done
6535 }
6536
6537
6538
6539
6540 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6541 thread.resume()
6542 goto done
6543 }
6544
6545
6546
6547 thread.takeP()
6548 thread.resume()
6549 n++
6550
6551
6552 handoffp(pp)
6553
6554
6555
6556 done:
6557 incidlelocked(1)
6558 lock(&allpLock)
6559 }
6560 unlock(&allpLock)
6561 return uint32(n)
6562 }
6563
6564
6565
6566 type syscallingThread struct {
6567 gp *g
6568 mp *m
6569 pp *p
6570 status uint32
6571 }
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6588 if pp.status != _Prunning {
6589 return syscallingThread{}, false
6590 }
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602 mp := pp.m.ptr()
6603 if mp == nil {
6604
6605 return syscallingThread{}, false
6606 }
6607 gp := mp.curg
6608 if gp == nil {
6609
6610 return syscallingThread{}, false
6611 }
6612 status := readgstatus(gp) &^ _Gscan
6613
6614
6615
6616
6617 if status != _Gsyscall && status != _Gdeadextra {
6618
6619 return syscallingThread{}, false
6620 }
6621 if !castogscanstatus(gp, status, status|_Gscan) {
6622
6623 return syscallingThread{}, false
6624 }
6625 if gp.m != mp || gp.m.p.ptr() != pp {
6626
6627 casfrom_Gscanstatus(gp, status|_Gscan, status)
6628 return syscallingThread{}, false
6629 }
6630 return syscallingThread{gp, mp, pp, status}, true
6631 }
6632
6633
6634
6635
6636
6637 func (s syscallingThread) gcstopP() {
6638 assertLockHeld(&sched.lock)
6639
6640 s.releaseP(_Pgcstop)
6641 s.pp.gcStopTime = nanotime()
6642 sched.stopwait--
6643 }
6644
6645
6646
6647 func (s syscallingThread) takeP() {
6648 s.releaseP(_Pidle)
6649 }
6650
6651
6652
6653
6654 func (s syscallingThread) releaseP(state uint32) {
6655 if state != _Pidle && state != _Pgcstop {
6656 throw("attempted to release P into a bad state")
6657 }
6658 trace := traceAcquire()
6659 s.pp.m = 0
6660 s.mp.p = 0
6661 atomic.Store(&s.pp.status, state)
6662 if trace.ok() {
6663 trace.ProcSteal(s.pp)
6664 traceRelease(trace)
6665 }
6666 sched.nGsyscallNoP.Add(1)
6667 s.pp.syscalltick++
6668 }
6669
6670
6671 func (s syscallingThread) resume() {
6672 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6673 }
6674
6675
6676
6677
6678
6679
6680 func preemptall() bool {
6681 res := false
6682 for _, pp := range allp {
6683 if pp.status != _Prunning {
6684 continue
6685 }
6686 if preemptone(pp) {
6687 res = true
6688 }
6689 }
6690 return res
6691 }
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703 func preemptone(pp *p) bool {
6704 mp := pp.m.ptr()
6705 if mp == nil || mp == getg().m {
6706 return false
6707 }
6708 gp := mp.curg
6709 if gp == nil || gp == mp.g0 {
6710 return false
6711 }
6712 if readgstatus(gp)&^_Gscan == _Gsyscall {
6713
6714 return false
6715 }
6716
6717 gp.preempt = true
6718
6719
6720
6721
6722
6723 gp.stackguard0 = stackPreempt
6724
6725
6726 if preemptMSupported && debug.asyncpreemptoff == 0 {
6727 pp.preempt = true
6728 preemptM(mp)
6729 }
6730
6731 return true
6732 }
6733
6734 var starttime int64
6735
6736 func schedtrace(detailed bool) {
6737 now := nanotime()
6738 if starttime == 0 {
6739 starttime = now
6740 }
6741
6742 lock(&sched.lock)
6743 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6744 if detailed {
6745 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6746 }
6747
6748
6749
6750 for i, pp := range allp {
6751 h := atomic.Load(&pp.runqhead)
6752 t := atomic.Load(&pp.runqtail)
6753 if detailed {
6754 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6755 mp := pp.m.ptr()
6756 if mp != nil {
6757 print(mp.id)
6758 } else {
6759 print("nil")
6760 }
6761 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6762 } else {
6763
6764
6765 print(" ")
6766 if i == 0 {
6767 print("[ ")
6768 }
6769 print(t - h)
6770 if i == len(allp)-1 {
6771 print(" ]")
6772 }
6773 }
6774 }
6775
6776 if !detailed {
6777
6778 print(" schedticks=[ ")
6779 for _, pp := range allp {
6780 print(pp.schedtick)
6781 print(" ")
6782 }
6783 print("]\n")
6784 }
6785
6786 if !detailed {
6787 unlock(&sched.lock)
6788 return
6789 }
6790
6791 for mp := allm; mp != nil; mp = mp.alllink {
6792 pp := mp.p.ptr()
6793 print(" M", mp.id, ": p=")
6794 if pp != nil {
6795 print(pp.id)
6796 } else {
6797 print("nil")
6798 }
6799 print(" curg=")
6800 if mp.curg != nil {
6801 print(mp.curg.goid)
6802 } else {
6803 print("nil")
6804 }
6805 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6806 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6807 print(lockedg.goid)
6808 } else {
6809 print("nil")
6810 }
6811 print("\n")
6812 }
6813
6814 forEachG(func(gp *g) {
6815 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6816 if gp.m != nil {
6817 print(gp.m.id)
6818 } else {
6819 print("nil")
6820 }
6821 print(" lockedm=")
6822 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6823 print(lockedm.id)
6824 } else {
6825 print("nil")
6826 }
6827 print("\n")
6828 })
6829 unlock(&sched.lock)
6830 }
6831
6832 type updateMaxProcsGState struct {
6833 lock mutex
6834 g *g
6835 idle atomic.Bool
6836
6837
6838 procs int32
6839 }
6840
6841 var (
6842
6843
6844 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6845
6846
6847
6848 updateMaxProcsG updateMaxProcsGState
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897 computeMaxProcsLock mutex
6898 )
6899
6900
6901
6902
6903 func defaultGOMAXPROCSUpdateEnable() {
6904 if debug.updatemaxprocs == 0 {
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916 updatemaxprocs.IncNonDefault()
6917 return
6918 }
6919
6920 go updateMaxProcsGoroutine()
6921 }
6922
6923 func updateMaxProcsGoroutine() {
6924 updateMaxProcsG.g = getg()
6925 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6926 for {
6927 lock(&updateMaxProcsG.lock)
6928 if updateMaxProcsG.idle.Load() {
6929 throw("updateMaxProcsGoroutine: phase error")
6930 }
6931 updateMaxProcsG.idle.Store(true)
6932 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6933
6934
6935 stw := stopTheWorldGC(stwGOMAXPROCS)
6936
6937
6938 lock(&sched.lock)
6939 custom := sched.customGOMAXPROCS
6940 unlock(&sched.lock)
6941 if custom {
6942 startTheWorldGC(stw)
6943 return
6944 }
6945
6946
6947
6948
6949
6950 newprocs = updateMaxProcsG.procs
6951 lock(&sched.lock)
6952 sched.customGOMAXPROCS = false
6953 unlock(&sched.lock)
6954
6955 startTheWorldGC(stw)
6956 }
6957 }
6958
6959 func sysmonUpdateGOMAXPROCS() {
6960
6961 lock(&computeMaxProcsLock)
6962
6963
6964 lock(&sched.lock)
6965 custom := sched.customGOMAXPROCS
6966 curr := gomaxprocs
6967 unlock(&sched.lock)
6968 if custom {
6969 unlock(&computeMaxProcsLock)
6970 return
6971 }
6972
6973
6974 procs := defaultGOMAXPROCS(0)
6975 unlock(&computeMaxProcsLock)
6976 if procs == curr {
6977
6978 return
6979 }
6980
6981
6982
6983
6984 if updateMaxProcsG.idle.Load() {
6985 lock(&updateMaxProcsG.lock)
6986 updateMaxProcsG.procs = procs
6987 updateMaxProcsG.idle.Store(false)
6988 var list gList
6989 list.push(updateMaxProcsG.g)
6990 injectglist(&list)
6991 unlock(&updateMaxProcsG.lock)
6992 }
6993 }
6994
6995
6996
6997
6998
6999
7000 func schedEnableUser(enable bool) {
7001 lock(&sched.lock)
7002 if sched.disable.user == !enable {
7003 unlock(&sched.lock)
7004 return
7005 }
7006 sched.disable.user = !enable
7007 if enable {
7008 n := sched.disable.runnable.size
7009 globrunqputbatch(&sched.disable.runnable)
7010 unlock(&sched.lock)
7011 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7012 startm(nil, false, false)
7013 }
7014 } else {
7015 unlock(&sched.lock)
7016 }
7017 }
7018
7019
7020
7021
7022
7023 func schedEnabled(gp *g) bool {
7024 assertLockHeld(&sched.lock)
7025
7026 if sched.disable.user {
7027 return isSystemGoroutine(gp, true)
7028 }
7029 return true
7030 }
7031
7032
7033
7034
7035
7036
7037 func mput(mp *m) {
7038 assertLockHeld(&sched.lock)
7039
7040 sched.midle.push(unsafe.Pointer(mp))
7041 sched.nmidle++
7042 checkdead()
7043 }
7044
7045
7046
7047
7048
7049
7050 func mget() *m {
7051 assertLockHeld(&sched.lock)
7052
7053 mp := (*m)(sched.midle.pop())
7054 if mp != nil {
7055 sched.nmidle--
7056 }
7057 return mp
7058 }
7059
7060
7061
7062
7063
7064
7065
7066
7067 func mgetSpecific(mp *m) *m {
7068 assertLockHeld(&sched.lock)
7069
7070 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7071
7072 return nil
7073 }
7074
7075 sched.midle.remove(unsafe.Pointer(mp))
7076 sched.nmidle--
7077
7078 return mp
7079 }
7080
7081
7082
7083
7084
7085
7086 func globrunqput(gp *g) {
7087 assertLockHeld(&sched.lock)
7088
7089 sched.runq.pushBack(gp)
7090 }
7091
7092
7093
7094
7095
7096
7097 func globrunqputhead(gp *g) {
7098 assertLockHeld(&sched.lock)
7099
7100 sched.runq.push(gp)
7101 }
7102
7103
7104
7105
7106
7107
7108
7109 func globrunqputbatch(batch *gQueue) {
7110 assertLockHeld(&sched.lock)
7111
7112 sched.runq.pushBackAll(*batch)
7113 *batch = gQueue{}
7114 }
7115
7116
7117
7118 func globrunqget() *g {
7119 assertLockHeld(&sched.lock)
7120
7121 if sched.runq.size == 0 {
7122 return nil
7123 }
7124
7125 return sched.runq.pop()
7126 }
7127
7128
7129
7130 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7131 assertLockHeld(&sched.lock)
7132
7133 if sched.runq.size == 0 {
7134 return
7135 }
7136
7137 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7138
7139 gp = sched.runq.pop()
7140 n--
7141
7142 for ; n > 0; n-- {
7143 gp1 := sched.runq.pop()
7144 q.pushBack(gp1)
7145 }
7146 return
7147 }
7148
7149
7150 type pMask []uint32
7151
7152
7153 func (p pMask) read(id uint32) bool {
7154 word := id / 32
7155 mask := uint32(1) << (id % 32)
7156 return (atomic.Load(&p[word]) & mask) != 0
7157 }
7158
7159
7160 func (p pMask) set(id int32) {
7161 word := id / 32
7162 mask := uint32(1) << (id % 32)
7163 atomic.Or(&p[word], mask)
7164 }
7165
7166
7167 func (p pMask) clear(id int32) {
7168 word := id / 32
7169 mask := uint32(1) << (id % 32)
7170 atomic.And(&p[word], ^mask)
7171 }
7172
7173
7174 func (p pMask) any() bool {
7175 for i := range p {
7176 if atomic.Load(&p[i]) != 0 {
7177 return true
7178 }
7179 }
7180 return false
7181 }
7182
7183
7184
7185
7186
7187 func (p pMask) resize(nprocs int32) pMask {
7188 maskWords := (nprocs + 31) / 32
7189
7190 if maskWords <= int32(cap(p)) {
7191 return p[:maskWords]
7192 }
7193 newMask := make([]uint32, maskWords)
7194
7195 copy(newMask, p)
7196 return newMask
7197 }
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210 func pidleput(pp *p, now int64) int64 {
7211 assertLockHeld(&sched.lock)
7212
7213 if !runqempty(pp) {
7214 throw("pidleput: P has non-empty run queue")
7215 }
7216 if now == 0 {
7217 now = nanotime()
7218 }
7219 if pp.timers.len.Load() == 0 {
7220 timerpMask.clear(pp.id)
7221 }
7222 idlepMask.set(pp.id)
7223 pp.link = sched.pidle
7224 sched.pidle.set(pp)
7225 sched.npidle.Add(1)
7226 if !pp.limiterEvent.start(limiterEventIdle, now) {
7227 throw("must be able to track idle limiter event")
7228 }
7229 return now
7230 }
7231
7232
7233
7234
7235
7236
7237
7238
7239 func pidleget(now int64) (*p, int64) {
7240 assertLockHeld(&sched.lock)
7241
7242 pp := sched.pidle.ptr()
7243 if pp != nil {
7244
7245 if now == 0 {
7246 now = nanotime()
7247 }
7248 timerpMask.set(pp.id)
7249 idlepMask.clear(pp.id)
7250 sched.pidle = pp.link
7251 sched.npidle.Add(-1)
7252 pp.limiterEvent.stop(limiterEventIdle, now)
7253 }
7254 return pp, now
7255 }
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267 func pidlegetSpinning(now int64) (*p, int64) {
7268 assertLockHeld(&sched.lock)
7269
7270 pp, now := pidleget(now)
7271 if pp == nil {
7272
7273
7274
7275 sched.needspinning.Store(1)
7276 return nil, now
7277 }
7278
7279 return pp, now
7280 }
7281
7282
7283
7284 func runqempty(pp *p) bool {
7285
7286
7287
7288
7289 for {
7290 head := atomic.Load(&pp.runqhead)
7291 tail := atomic.Load(&pp.runqtail)
7292 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7293 if tail == atomic.Load(&pp.runqtail) {
7294 return head == tail && runnext == 0
7295 }
7296 }
7297 }
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308 const randomizeScheduler = raceenabled
7309
7310
7311
7312
7313
7314
7315 func runqput(pp *p, gp *g, next bool) {
7316 if !haveSysmon && next {
7317
7318
7319
7320
7321
7322
7323
7324
7325 next = false
7326 }
7327 if randomizeScheduler && next && randn(2) == 0 {
7328 next = false
7329 }
7330
7331 if next {
7332 retryNext:
7333 oldnext := pp.runnext
7334 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7335 goto retryNext
7336 }
7337 if oldnext == 0 {
7338 return
7339 }
7340
7341 gp = oldnext.ptr()
7342 }
7343
7344 retry:
7345 h := atomic.LoadAcq(&pp.runqhead)
7346 t := pp.runqtail
7347 if t-h < uint32(len(pp.runq)) {
7348 pp.runq[t%uint32(len(pp.runq))].set(gp)
7349 atomic.StoreRel(&pp.runqtail, t+1)
7350 return
7351 }
7352 if runqputslow(pp, gp, h, t) {
7353 return
7354 }
7355
7356 goto retry
7357 }
7358
7359
7360
7361 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7362 var batch [len(pp.runq)/2 + 1]*g
7363
7364
7365 n := t - h
7366 n = n / 2
7367 if n != uint32(len(pp.runq)/2) {
7368 throw("runqputslow: queue is not full")
7369 }
7370 for i := uint32(0); i < n; i++ {
7371 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7372 }
7373 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7374 return false
7375 }
7376 batch[n] = gp
7377
7378 if randomizeScheduler {
7379 for i := uint32(1); i <= n; i++ {
7380 j := cheaprandn(i + 1)
7381 batch[i], batch[j] = batch[j], batch[i]
7382 }
7383 }
7384
7385
7386 for i := uint32(0); i < n; i++ {
7387 batch[i].schedlink.set(batch[i+1])
7388 }
7389
7390 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7391
7392
7393 lock(&sched.lock)
7394 globrunqputbatch(&q)
7395 unlock(&sched.lock)
7396 return true
7397 }
7398
7399
7400
7401
7402 func runqputbatch(pp *p, q *gQueue) {
7403 if q.empty() {
7404 return
7405 }
7406 h := atomic.LoadAcq(&pp.runqhead)
7407 t := pp.runqtail
7408 n := uint32(0)
7409 for !q.empty() && t-h < uint32(len(pp.runq)) {
7410 gp := q.pop()
7411 pp.runq[t%uint32(len(pp.runq))].set(gp)
7412 t++
7413 n++
7414 }
7415
7416 if randomizeScheduler {
7417 off := func(o uint32) uint32 {
7418 return (pp.runqtail + o) % uint32(len(pp.runq))
7419 }
7420 for i := uint32(1); i < n; i++ {
7421 j := cheaprandn(i + 1)
7422 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7423 }
7424 }
7425
7426 atomic.StoreRel(&pp.runqtail, t)
7427
7428 return
7429 }
7430
7431
7432
7433
7434
7435 func runqget(pp *p) (gp *g, inheritTime bool) {
7436
7437 next := pp.runnext
7438
7439
7440
7441 if next != 0 && pp.runnext.cas(next, 0) {
7442 return next.ptr(), true
7443 }
7444
7445 for {
7446 h := atomic.LoadAcq(&pp.runqhead)
7447 t := pp.runqtail
7448 if t == h {
7449 return nil, false
7450 }
7451 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7452 if atomic.CasRel(&pp.runqhead, h, h+1) {
7453 return gp, false
7454 }
7455 }
7456 }
7457
7458
7459
7460 func runqdrain(pp *p) (drainQ gQueue) {
7461 oldNext := pp.runnext
7462 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7463 drainQ.pushBack(oldNext.ptr())
7464 }
7465
7466 retry:
7467 h := atomic.LoadAcq(&pp.runqhead)
7468 t := pp.runqtail
7469 qn := t - h
7470 if qn == 0 {
7471 return
7472 }
7473 if qn > uint32(len(pp.runq)) {
7474 goto retry
7475 }
7476
7477 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7478 goto retry
7479 }
7480
7481
7482
7483
7484
7485
7486
7487
7488 for i := uint32(0); i < qn; i++ {
7489 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7490 drainQ.pushBack(gp)
7491 }
7492 return
7493 }
7494
7495
7496
7497
7498
7499 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7500 for {
7501 h := atomic.LoadAcq(&pp.runqhead)
7502 t := atomic.LoadAcq(&pp.runqtail)
7503 n := t - h
7504 n = n - n/2
7505 if n == 0 {
7506 if stealRunNextG {
7507
7508 if next := pp.runnext; next != 0 {
7509 if pp.status == _Prunning {
7510 if mp := pp.m.ptr(); mp != nil {
7511 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531 if !osHasLowResTimer {
7532 usleep(3)
7533 } else {
7534
7535
7536
7537 osyield()
7538 }
7539 }
7540 }
7541 }
7542 if !pp.runnext.cas(next, 0) {
7543 continue
7544 }
7545 batch[batchHead%uint32(len(batch))] = next
7546 return 1
7547 }
7548 }
7549 return 0
7550 }
7551 if n > uint32(len(pp.runq)/2) {
7552 continue
7553 }
7554 for i := uint32(0); i < n; i++ {
7555 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7556 batch[(batchHead+i)%uint32(len(batch))] = g
7557 }
7558 if atomic.CasRel(&pp.runqhead, h, h+n) {
7559 return n
7560 }
7561 }
7562 }
7563
7564
7565
7566
7567 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7568 t := pp.runqtail
7569 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7570 if n == 0 {
7571 return nil
7572 }
7573 n--
7574 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7575 if n == 0 {
7576 return gp
7577 }
7578 h := atomic.LoadAcq(&pp.runqhead)
7579 if t-h+n >= uint32(len(pp.runq)) {
7580 throw("runqsteal: runq overflow")
7581 }
7582 atomic.StoreRel(&pp.runqtail, t+n)
7583 return gp
7584 }
7585
7586
7587
7588 type gQueue struct {
7589 head guintptr
7590 tail guintptr
7591 size int32
7592 }
7593
7594
7595 func (q *gQueue) empty() bool {
7596 return q.head == 0
7597 }
7598
7599
7600 func (q *gQueue) push(gp *g) {
7601 gp.schedlink = q.head
7602 q.head.set(gp)
7603 if q.tail == 0 {
7604 q.tail.set(gp)
7605 }
7606 q.size++
7607 }
7608
7609
7610 func (q *gQueue) pushBack(gp *g) {
7611 gp.schedlink = 0
7612 if q.tail != 0 {
7613 q.tail.ptr().schedlink.set(gp)
7614 } else {
7615 q.head.set(gp)
7616 }
7617 q.tail.set(gp)
7618 q.size++
7619 }
7620
7621
7622
7623 func (q *gQueue) pushBackAll(q2 gQueue) {
7624 if q2.tail == 0 {
7625 return
7626 }
7627 q2.tail.ptr().schedlink = 0
7628 if q.tail != 0 {
7629 q.tail.ptr().schedlink = q2.head
7630 } else {
7631 q.head = q2.head
7632 }
7633 q.tail = q2.tail
7634 q.size += q2.size
7635 }
7636
7637
7638
7639 func (q *gQueue) pop() *g {
7640 gp := q.head.ptr()
7641 if gp != nil {
7642 q.head = gp.schedlink
7643 if q.head == 0 {
7644 q.tail = 0
7645 }
7646 q.size--
7647 }
7648 return gp
7649 }
7650
7651
7652 func (q *gQueue) popList() gList {
7653 stack := gList{q.head, q.size}
7654 *q = gQueue{}
7655 return stack
7656 }
7657
7658
7659
7660 type gList struct {
7661 head guintptr
7662 size int32
7663 }
7664
7665
7666 func (l *gList) empty() bool {
7667 return l.head == 0
7668 }
7669
7670
7671 func (l *gList) push(gp *g) {
7672 gp.schedlink = l.head
7673 l.head.set(gp)
7674 l.size++
7675 }
7676
7677
7678 func (l *gList) pushAll(q gQueue) {
7679 if !q.empty() {
7680 q.tail.ptr().schedlink = l.head
7681 l.head = q.head
7682 l.size += q.size
7683 }
7684 }
7685
7686
7687 func (l *gList) pop() *g {
7688 gp := l.head.ptr()
7689 if gp != nil {
7690 l.head = gp.schedlink
7691 l.size--
7692 }
7693 return gp
7694 }
7695
7696
7697 func setMaxThreads(in int) (out int) {
7698 lock(&sched.lock)
7699 out = int(sched.maxmcount)
7700 if in > 0x7fffffff {
7701 sched.maxmcount = 0x7fffffff
7702 } else {
7703 sched.maxmcount = int32(in)
7704 }
7705 checkmcount()
7706 unlock(&sched.lock)
7707 return
7708 }
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722 func procPin() int {
7723 gp := getg()
7724 mp := gp.m
7725
7726 mp.locks++
7727 return int(mp.p.ptr().id)
7728 }
7729
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742 func procUnpin() {
7743 gp := getg()
7744 gp.m.locks--
7745 }
7746
7747
7748
7749 func sync_runtime_procPin() int {
7750 return procPin()
7751 }
7752
7753
7754
7755 func sync_runtime_procUnpin() {
7756 procUnpin()
7757 }
7758
7759
7760
7761 func sync_atomic_runtime_procPin() int {
7762 return procPin()
7763 }
7764
7765
7766
7767 func sync_atomic_runtime_procUnpin() {
7768 procUnpin()
7769 }
7770
7771
7772
7773
7774
7775 func internal_sync_runtime_canSpin(i int) bool {
7776
7777
7778
7779
7780
7781 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7782 return false
7783 }
7784 if p := getg().m.p.ptr(); !runqempty(p) {
7785 return false
7786 }
7787 return true
7788 }
7789
7790
7791
7792 func internal_sync_runtime_doSpin() {
7793 procyield(active_spin_cnt)
7794 }
7795
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810 func sync_runtime_canSpin(i int) bool {
7811 return internal_sync_runtime_canSpin(i)
7812 }
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825
7826 func sync_runtime_doSpin() {
7827 internal_sync_runtime_doSpin()
7828 }
7829
7830 var stealOrder randomOrder
7831
7832
7833
7834
7835
7836 type randomOrder struct {
7837 count uint32
7838 coprimes []uint32
7839 }
7840
7841 type randomEnum struct {
7842 i uint32
7843 count uint32
7844 pos uint32
7845 inc uint32
7846 }
7847
7848 func (ord *randomOrder) reset(count uint32) {
7849 ord.count = count
7850 ord.coprimes = ord.coprimes[:0]
7851 for i := uint32(1); i <= count; i++ {
7852 if gcd(i, count) == 1 {
7853 ord.coprimes = append(ord.coprimes, i)
7854 }
7855 }
7856 }
7857
7858 func (ord *randomOrder) start(i uint32) randomEnum {
7859 return randomEnum{
7860 count: ord.count,
7861 pos: i % ord.count,
7862 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7863 }
7864 }
7865
7866 func (enum *randomEnum) done() bool {
7867 return enum.i == enum.count
7868 }
7869
7870 func (enum *randomEnum) next() {
7871 enum.i++
7872 enum.pos = (enum.pos + enum.inc) % enum.count
7873 }
7874
7875 func (enum *randomEnum) position() uint32 {
7876 return enum.pos
7877 }
7878
7879 func gcd(a, b uint32) uint32 {
7880 for b != 0 {
7881 a, b = b, a%b
7882 }
7883 return a
7884 }
7885
7886
7887
7888 type initTask struct {
7889 state uint32
7890 nfns uint32
7891
7892 }
7893
7894
7895
7896 var inittrace tracestat
7897
7898 type tracestat struct {
7899 active bool
7900 id uint64
7901 allocs uint64
7902 bytes uint64
7903 }
7904
7905 func doInit(ts []*initTask) {
7906 for _, t := range ts {
7907 doInit1(t)
7908 }
7909 }
7910
7911 func doInit1(t *initTask) {
7912 switch t.state {
7913 case 2:
7914 return
7915 case 1:
7916 throw("recursive call during initialization - linker skew")
7917 default:
7918 t.state = 1
7919
7920 var (
7921 start int64
7922 before tracestat
7923 )
7924
7925 if inittrace.active {
7926 start = nanotime()
7927
7928 before = inittrace
7929 }
7930
7931 if t.nfns == 0 {
7932
7933 throw("inittask with no functions")
7934 }
7935
7936 firstFunc := add(unsafe.Pointer(t), 8)
7937 for i := uint32(0); i < t.nfns; i++ {
7938 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7939 f := *(*func())(unsafe.Pointer(&p))
7940 f()
7941 }
7942
7943 if inittrace.active {
7944 end := nanotime()
7945
7946 after := inittrace
7947
7948 f := *(*func())(unsafe.Pointer(&firstFunc))
7949 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7950
7951 var sbuf [24]byte
7952 print("init ", pkg, " @")
7953 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7954 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7955 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7956 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7957 print("\n")
7958 }
7959
7960 t.state = 2
7961 }
7962 }
7963
View as plain text