Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/strconv"
15 "internal/runtime/sys"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 for m := &firstmoduledata; m != nil; m = m.next {
256 doInit(m.inittasks)
257 }
258
259
260
261 inittrace.active = false
262
263 close(main_init_done)
264
265 needUnlock = false
266 unlockOSThread()
267
268 if isarchive || islibrary {
269
270
271 if GOARCH == "wasm" {
272
273
274
275
276
277
278
279 pause(sys.GetCallerSP() - 16)
280 panic("unreachable")
281 }
282 return
283 }
284 fn := main_main
285 fn()
286
287 exitHooksRun := false
288 if raceenabled {
289 runExitHooks(0)
290 exitHooksRun = true
291 racefini()
292 }
293
294
295
296
297
298
299
300
301 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
302 runExitHooks(0)
303 exitHooksRun = true
304 lsandoleakcheck()
305 }
306
307
308
309
310
311 if runningPanicDefers.Load() != 0 {
312
313 for c := 0; c < 1000; c++ {
314 if runningPanicDefers.Load() == 0 {
315 break
316 }
317 Gosched()
318 }
319 }
320 if panicking.Load() != 0 {
321 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
322 }
323 if !exitHooksRun {
324 runExitHooks(0)
325 }
326
327 exit(0)
328 for {
329 var x *int32
330 *x = 0
331 }
332 }
333
334
335
336
337 func os_beforeExit(exitCode int) {
338 runExitHooks(exitCode)
339 if exitCode == 0 && raceenabled {
340 racefini()
341 }
342
343
344 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
345 lsandoleakcheck()
346 }
347 }
348
349 func init() {
350 exithook.Gosched = Gosched
351 exithook.Goid = func() uint64 { return getg().goid }
352 exithook.Throw = throw
353 }
354
355 func runExitHooks(code int) {
356 exithook.Run(code)
357 }
358
359
360 func init() {
361 go forcegchelper()
362 }
363
364 func forcegchelper() {
365 forcegc.g = getg()
366 lockInit(&forcegc.lock, lockRankForcegc)
367 for {
368 lock(&forcegc.lock)
369 if forcegc.idle.Load() {
370 throw("forcegc: phase error")
371 }
372 forcegc.idle.Store(true)
373 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
374
375 if debug.gctrace > 0 {
376 println("GC forced")
377 }
378
379 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
380 }
381 }
382
383
384
385
386
387 func Gosched() {
388 checkTimeouts()
389 mcall(gosched_m)
390 }
391
392
393
394
395
396 func goschedguarded() {
397 mcall(goschedguarded_m)
398 }
399
400
401
402
403
404
405 func goschedIfBusy() {
406 gp := getg()
407
408
409 if !gp.preempt && sched.npidle.Load() > 0 {
410 return
411 }
412 mcall(gosched_m)
413 }
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
444 if reason != waitReasonSleep {
445 checkTimeouts()
446 }
447 mp := acquirem()
448 gp := mp.curg
449 status := readgstatus(gp)
450 if status != _Grunning && status != _Gscanrunning {
451 throw("gopark: bad g status")
452 }
453 mp.waitlock = lock
454 mp.waitunlockf = unlockf
455 gp.waitreason = reason
456 mp.waitTraceBlockReason = traceReason
457 mp.waitTraceSkip = traceskip
458 releasem(mp)
459
460 mcall(park_m)
461 }
462
463
464
465 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
466 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
467 }
468
469
470
471
472
473
474
475
476
477
478
479 func goready(gp *g, traceskip int) {
480 systemstack(func() {
481 ready(gp, traceskip, true)
482 })
483 }
484
485
486 func acquireSudog() *sudog {
487
488
489
490
491
492
493
494
495 mp := acquirem()
496 pp := mp.p.ptr()
497 if len(pp.sudogcache) == 0 {
498 lock(&sched.sudoglock)
499
500 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
501 s := sched.sudogcache
502 sched.sudogcache = s.next
503 s.next = nil
504 pp.sudogcache = append(pp.sudogcache, s)
505 }
506 unlock(&sched.sudoglock)
507
508 if len(pp.sudogcache) == 0 {
509 pp.sudogcache = append(pp.sudogcache, new(sudog))
510 }
511 }
512 n := len(pp.sudogcache)
513 s := pp.sudogcache[n-1]
514 pp.sudogcache[n-1] = nil
515 pp.sudogcache = pp.sudogcache[:n-1]
516 if s.elem != nil {
517 throw("acquireSudog: found s.elem != nil in cache")
518 }
519 releasem(mp)
520 return s
521 }
522
523
524 func releaseSudog(s *sudog) {
525 if s.elem != nil {
526 throw("runtime: sudog with non-nil elem")
527 }
528 if s.isSelect {
529 throw("runtime: sudog with non-false isSelect")
530 }
531 if s.next != nil {
532 throw("runtime: sudog with non-nil next")
533 }
534 if s.prev != nil {
535 throw("runtime: sudog with non-nil prev")
536 }
537 if s.waitlink != nil {
538 throw("runtime: sudog with non-nil waitlink")
539 }
540 if s.c != nil {
541 throw("runtime: sudog with non-nil c")
542 }
543 gp := getg()
544 if gp.param != nil {
545 throw("runtime: releaseSudog with non-nil gp.param")
546 }
547 mp := acquirem()
548 pp := mp.p.ptr()
549 if len(pp.sudogcache) == cap(pp.sudogcache) {
550
551 var first, last *sudog
552 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
553 n := len(pp.sudogcache)
554 p := pp.sudogcache[n-1]
555 pp.sudogcache[n-1] = nil
556 pp.sudogcache = pp.sudogcache[:n-1]
557 if first == nil {
558 first = p
559 } else {
560 last.next = p
561 }
562 last = p
563 }
564 lock(&sched.sudoglock)
565 last.next = sched.sudogcache
566 sched.sudogcache = first
567 unlock(&sched.sudoglock)
568 }
569 pp.sudogcache = append(pp.sudogcache, s)
570 releasem(mp)
571 }
572
573
574 func badmcall(fn func(*g)) {
575 throw("runtime: mcall called on m->g0 stack")
576 }
577
578 func badmcall2(fn func(*g)) {
579 throw("runtime: mcall function returned")
580 }
581
582 func badreflectcall() {
583 panic(plainError("arg size to reflect.call more than 1GB"))
584 }
585
586
587
588 func badmorestackg0() {
589 if !crashStackImplemented {
590 writeErrStr("fatal: morestack on g0\n")
591 return
592 }
593
594 g := getg()
595 switchToCrashStack(func() {
596 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
597 g.m.traceback = 2
598 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
599 print("\n")
600
601 throw("morestack on g0")
602 })
603 }
604
605
606
607 func badmorestackgsignal() {
608 writeErrStr("fatal: morestack on gsignal\n")
609 }
610
611
612 func badctxt() {
613 throw("ctxt != 0")
614 }
615
616
617
618 var gcrash g
619
620 var crashingG atomic.Pointer[g]
621
622
623
624
625
626
627
628
629
630 func switchToCrashStack(fn func()) {
631 me := getg()
632 if crashingG.CompareAndSwapNoWB(nil, me) {
633 switchToCrashStack0(fn)
634 abort()
635 }
636 if crashingG.Load() == me {
637
638 writeErrStr("fatal: recursive switchToCrashStack\n")
639 abort()
640 }
641
642 usleep_no_g(100)
643 writeErrStr("fatal: concurrent switchToCrashStack\n")
644 abort()
645 }
646
647
648
649
650 const crashStackImplemented = GOOS != "windows"
651
652
653 func switchToCrashStack0(fn func())
654
655 func lockedOSThread() bool {
656 gp := getg()
657 return gp.lockedm != 0 && gp.m.lockedg != 0
658 }
659
660 var (
661
662
663
664
665
666
667 allglock mutex
668 allgs []*g
669
670
671
672
673
674
675
676
677
678
679
680
681
682 allglen uintptr
683 allgptr **g
684 )
685
686 func allgadd(gp *g) {
687 if readgstatus(gp) == _Gidle {
688 throw("allgadd: bad status Gidle")
689 }
690
691 lock(&allglock)
692 allgs = append(allgs, gp)
693 if &allgs[0] != allgptr {
694 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
695 }
696 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
697 unlock(&allglock)
698 }
699
700
701
702
703 func allGsSnapshot() []*g {
704 assertWorldStoppedOrLockHeld(&allglock)
705
706
707
708
709
710
711 return allgs[:len(allgs):len(allgs)]
712 }
713
714
715 func atomicAllG() (**g, uintptr) {
716 length := atomic.Loaduintptr(&allglen)
717 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
718 return ptr, length
719 }
720
721
722 func atomicAllGIndex(ptr **g, i uintptr) *g {
723 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
724 }
725
726
727
728
729 func forEachG(fn func(gp *g)) {
730 lock(&allglock)
731 for _, gp := range allgs {
732 fn(gp)
733 }
734 unlock(&allglock)
735 }
736
737
738
739
740
741 func forEachGRace(fn func(gp *g)) {
742 ptr, length := atomicAllG()
743 for i := uintptr(0); i < length; i++ {
744 gp := atomicAllGIndex(ptr, i)
745 fn(gp)
746 }
747 return
748 }
749
750 const (
751
752
753 _GoidCacheBatch = 16
754 )
755
756
757
758 func cpuinit(env string) {
759 switch GOOS {
760 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
761 cpu.DebugOptions = true
762 }
763 cpu.Initialize(env)
764
765
766
767 switch GOARCH {
768 case "386", "amd64":
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771 x86HasFMA = cpu.X86.HasFMA
772
773 case "arm":
774 armHasVFPv4 = cpu.ARM.HasVFPv4
775
776 case "arm64":
777 arm64HasATOMICS = cpu.ARM64.HasATOMICS
778
779 case "loong64":
780 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
781 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
782 loong64HasLSX = cpu.Loong64.HasLSX
783
784 case "riscv64":
785 riscv64HasZbb = cpu.RISCV64.HasZbb
786 }
787 }
788
789
790
791
792
793
794 func getGodebugEarly() (string, bool) {
795 const prefix = "GODEBUG="
796 var env string
797 switch GOOS {
798 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
799
800
801
802 n := int32(0)
803 for argv_index(argv, argc+1+n) != nil {
804 n++
805 }
806
807 for i := int32(0); i < n; i++ {
808 p := argv_index(argv, argc+1+i)
809 s := unsafe.String(p, findnull(p))
810
811 if stringslite.HasPrefix(s, prefix) {
812 env = gostringnocopy(p)[len(prefix):]
813 break
814 }
815 }
816 break
817
818 default:
819 return "", false
820 }
821 return env, true
822 }
823
824
825
826
827
828
829
830
831
832 func schedinit() {
833 lockInit(&sched.lock, lockRankSched)
834 lockInit(&sched.sysmonlock, lockRankSysmon)
835 lockInit(&sched.deferlock, lockRankDefer)
836 lockInit(&sched.sudoglock, lockRankSudog)
837 lockInit(&deadlock, lockRankDeadlock)
838 lockInit(&paniclk, lockRankPanic)
839 lockInit(&allglock, lockRankAllg)
840 lockInit(&allpLock, lockRankAllp)
841 lockInit(&reflectOffs.lock, lockRankReflectOffs)
842 lockInit(&finlock, lockRankFin)
843 lockInit(&cpuprof.lock, lockRankCpuprof)
844 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
845 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
846 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
847 traceLockInit()
848
849
850
851 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
852
853 lockVerifyMSize()
854
855
856
857 gp := getg()
858 if raceenabled {
859 gp.racectx, raceprocctx0 = raceinit()
860 }
861
862 sched.maxmcount = 10000
863 crashFD.Store(^uintptr(0))
864
865
866 worldStopped()
867
868 godebug, parsedGodebug := getGodebugEarly()
869 if parsedGodebug {
870 parseRuntimeDebugVars(godebug)
871 }
872 ticks.init()
873 moduledataverify()
874 stackinit()
875 mallocinit()
876 cpuinit(godebug)
877 randinit()
878 alginit()
879 mcommoninit(gp.m, -1)
880 modulesinit()
881 typelinksinit()
882 itabsinit()
883 stkobjinit()
884
885 sigsave(&gp.m.sigmask)
886 initSigmask = gp.m.sigmask
887
888 goargs()
889 goenvs()
890 secure()
891 checkfds()
892 if !parsedGodebug {
893
894
895 parseRuntimeDebugVars(gogetenv("GODEBUG"))
896 }
897 finishDebugVarsSetup()
898 gcinit()
899
900
901
902 gcrash.stack = stackalloc(16384)
903 gcrash.stackguard0 = gcrash.stack.lo + 1000
904 gcrash.stackguard1 = gcrash.stack.lo + 1000
905
906
907
908
909
910 if disableMemoryProfiling {
911 MemProfileRate = 0
912 }
913
914
915 mProfStackInit(gp.m)
916 defaultGOMAXPROCSInit()
917
918 lock(&sched.lock)
919 sched.lastpoll.Store(nanotime())
920 var procs int32
921 if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
922 procs = n
923 sched.customGOMAXPROCS = true
924 } else {
925
926
927
928
929
930
931
932
933 procs = defaultGOMAXPROCS(numCPUStartup)
934 }
935 if procresize(procs) != nil {
936 throw("unknown runnable goroutine during bootstrap")
937 }
938 unlock(&sched.lock)
939
940
941 worldStarted()
942
943 if buildVersion == "" {
944
945
946 buildVersion = "unknown"
947 }
948 if len(modinfo) == 1 {
949
950
951 modinfo = ""
952 }
953 }
954
955 func dumpgstatus(gp *g) {
956 thisg := getg()
957 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
958 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
959 }
960
961
962 func checkmcount() {
963 assertLockHeld(&sched.lock)
964
965
966
967
968
969
970
971
972
973 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
974 if count > sched.maxmcount {
975 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
976 throw("thread exhaustion")
977 }
978 }
979
980
981
982
983
984 func mReserveID() int64 {
985 assertLockHeld(&sched.lock)
986
987 if sched.mnext+1 < sched.mnext {
988 throw("runtime: thread ID overflow")
989 }
990 id := sched.mnext
991 sched.mnext++
992 checkmcount()
993 return id
994 }
995
996
997 func mcommoninit(mp *m, id int64) {
998 gp := getg()
999
1000
1001 if gp != gp.m.g0 {
1002 callers(1, mp.createstack[:])
1003 }
1004
1005 lock(&sched.lock)
1006
1007 if id >= 0 {
1008 mp.id = id
1009 } else {
1010 mp.id = mReserveID()
1011 }
1012
1013 mrandinit(mp)
1014
1015 mpreinit(mp)
1016 if mp.gsignal != nil {
1017 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1018 }
1019
1020
1021
1022 mp.alllink = allm
1023
1024
1025
1026 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1027 unlock(&sched.lock)
1028
1029
1030 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1031 mp.cgoCallers = new(cgoCallers)
1032 }
1033 mProfStackInit(mp)
1034 }
1035
1036
1037
1038
1039
1040 func mProfStackInit(mp *m) {
1041 if debug.profstackdepth == 0 {
1042
1043
1044 return
1045 }
1046 mp.profStack = makeProfStackFP()
1047 mp.mLockProfile.stack = makeProfStackFP()
1048 }
1049
1050
1051
1052
1053 func makeProfStackFP() []uintptr {
1054
1055
1056
1057
1058
1059
1060 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1061 }
1062
1063
1064
1065 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1066
1067
1068 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1069
1070 func (mp *m) becomeSpinning() {
1071 mp.spinning = true
1072 sched.nmspinning.Add(1)
1073 sched.needspinning.Store(0)
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083 func (mp *m) snapshotAllp() []*p {
1084 mp.allpSnapshot = allp
1085 return mp.allpSnapshot
1086 }
1087
1088
1089
1090
1091
1092
1093
1094 func (mp *m) clearAllpSnapshot() {
1095 mp.allpSnapshot = nil
1096 }
1097
1098 func (mp *m) hasCgoOnStack() bool {
1099 return mp.ncgo > 0 || mp.isextra
1100 }
1101
1102 const (
1103
1104
1105 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1106
1107
1108
1109 osHasLowResClockInt = goos.IsWindows
1110
1111
1112
1113 osHasLowResClock = osHasLowResClockInt > 0
1114 )
1115
1116
1117 func ready(gp *g, traceskip int, next bool) {
1118 status := readgstatus(gp)
1119
1120
1121 mp := acquirem()
1122 if status&^_Gscan != _Gwaiting {
1123 dumpgstatus(gp)
1124 throw("bad g->status in ready")
1125 }
1126
1127
1128 trace := traceAcquire()
1129 casgstatus(gp, _Gwaiting, _Grunnable)
1130 if trace.ok() {
1131 trace.GoUnpark(gp, traceskip)
1132 traceRelease(trace)
1133 }
1134 runqput(mp.p.ptr(), gp, next)
1135 wakep()
1136 releasem(mp)
1137 }
1138
1139
1140
1141 const freezeStopWait = 0x7fffffff
1142
1143
1144
1145 var freezing atomic.Bool
1146
1147
1148
1149
1150 func freezetheworld() {
1151 freezing.Store(true)
1152 if debug.dontfreezetheworld > 0 {
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 usleep(1000)
1178 return
1179 }
1180
1181
1182
1183
1184 for i := 0; i < 5; i++ {
1185
1186 sched.stopwait = freezeStopWait
1187 sched.gcwaiting.Store(true)
1188
1189 if !preemptall() {
1190 break
1191 }
1192 usleep(1000)
1193 }
1194
1195 usleep(1000)
1196 preemptall()
1197 usleep(1000)
1198 }
1199
1200
1201
1202
1203
1204 func readgstatus(gp *g) uint32 {
1205 return gp.atomicstatus.Load()
1206 }
1207
1208
1209
1210
1211
1212 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1213 success := false
1214
1215
1216 switch oldval {
1217 default:
1218 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1219 dumpgstatus(gp)
1220 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1221 case _Gscanrunnable,
1222 _Gscanwaiting,
1223 _Gscanrunning,
1224 _Gscansyscall,
1225 _Gscanpreempted:
1226 if newval == oldval&^_Gscan {
1227 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1228 }
1229 }
1230 if !success {
1231 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1232 dumpgstatus(gp)
1233 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1234 }
1235 releaseLockRankAndM(lockRankGscan)
1236 }
1237
1238
1239
1240 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1241 switch oldval {
1242 case _Grunnable,
1243 _Grunning,
1244 _Gwaiting,
1245 _Gsyscall:
1246 if newval == oldval|_Gscan {
1247 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1248 if r {
1249 acquireLockRankAndM(lockRankGscan)
1250 }
1251 return r
1252
1253 }
1254 }
1255 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1256 throw("castogscanstatus")
1257 panic("not reached")
1258 }
1259
1260
1261
1262 var casgstatusAlwaysTrack = false
1263
1264
1265
1266
1267
1268
1269
1270 func casgstatus(gp *g, oldval, newval uint32) {
1271 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1272 systemstack(func() {
1273
1274
1275 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1276 throw("casgstatus: bad incoming values")
1277 })
1278 }
1279
1280 lockWithRankMayAcquire(nil, lockRankGscan)
1281
1282
1283 const yieldDelay = 5 * 1000
1284 var nextYield int64
1285
1286
1287
1288 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1289 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1290 systemstack(func() {
1291
1292
1293 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1294 })
1295 }
1296 if i == 0 {
1297 nextYield = nanotime() + yieldDelay
1298 }
1299 if nanotime() < nextYield {
1300 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1301 procyield(1)
1302 }
1303 } else {
1304 osyield()
1305 nextYield = nanotime() + yieldDelay/2
1306 }
1307 }
1308
1309 if gp.bubble != nil {
1310 systemstack(func() {
1311 gp.bubble.changegstatus(gp, oldval, newval)
1312 })
1313 }
1314
1315 if oldval == _Grunning {
1316
1317 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1318 gp.tracking = true
1319 }
1320 gp.trackingSeq++
1321 }
1322 if !gp.tracking {
1323 return
1324 }
1325
1326
1327
1328
1329
1330
1331 switch oldval {
1332 case _Grunnable:
1333
1334
1335
1336 now := nanotime()
1337 gp.runnableTime += now - gp.trackingStamp
1338 gp.trackingStamp = 0
1339 case _Gwaiting:
1340 if !gp.waitreason.isMutexWait() {
1341
1342 break
1343 }
1344
1345
1346
1347
1348
1349 now := nanotime()
1350 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1351 gp.trackingStamp = 0
1352 }
1353 switch newval {
1354 case _Gwaiting:
1355 if !gp.waitreason.isMutexWait() {
1356
1357 break
1358 }
1359
1360 now := nanotime()
1361 gp.trackingStamp = now
1362 case _Grunnable:
1363
1364
1365 now := nanotime()
1366 gp.trackingStamp = now
1367 case _Grunning:
1368
1369
1370
1371 gp.tracking = false
1372 sched.timeToRun.record(gp.runnableTime)
1373 gp.runnableTime = 0
1374 }
1375 }
1376
1377
1378
1379
1380 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1381
1382 gp.waitreason = reason
1383 casgstatus(gp, old, _Gwaiting)
1384 }
1385
1386
1387
1388
1389
1390 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1391 if !reason.isWaitingForSuspendG() {
1392 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1393 }
1394 casGToWaiting(gp, old, reason)
1395 }
1396
1397
1398
1399
1400
1401 func casGToPreemptScan(gp *g, old, new uint32) {
1402 if old != _Grunning || new != _Gscan|_Gpreempted {
1403 throw("bad g transition")
1404 }
1405 acquireLockRankAndM(lockRankGscan)
1406 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1407 }
1408
1409
1410
1411
1412
1413
1414 }
1415
1416
1417
1418
1419 func casGFromPreempted(gp *g, old, new uint32) bool {
1420 if old != _Gpreempted || new != _Gwaiting {
1421 throw("bad g transition")
1422 }
1423 gp.waitreason = waitReasonPreempted
1424 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1425 return false
1426 }
1427 if bubble := gp.bubble; bubble != nil {
1428 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1429 }
1430 return true
1431 }
1432
1433
1434 type stwReason uint8
1435
1436
1437
1438
1439 const (
1440 stwUnknown stwReason = iota
1441 stwGCMarkTerm
1442 stwGCSweepTerm
1443 stwWriteHeapDump
1444 stwGoroutineProfile
1445 stwGoroutineProfileCleanup
1446 stwAllGoroutinesStack
1447 stwReadMemStats
1448 stwAllThreadsSyscall
1449 stwGOMAXPROCS
1450 stwStartTrace
1451 stwStopTrace
1452 stwForTestCountPagesInUse
1453 stwForTestReadMetricsSlow
1454 stwForTestReadMemStatsSlow
1455 stwForTestPageCachePagesLeaked
1456 stwForTestResetDebugLog
1457 )
1458
1459 func (r stwReason) String() string {
1460 return stwReasonStrings[r]
1461 }
1462
1463 func (r stwReason) isGC() bool {
1464 return r == stwGCMarkTerm || r == stwGCSweepTerm
1465 }
1466
1467
1468
1469
1470 var stwReasonStrings = [...]string{
1471 stwUnknown: "unknown",
1472 stwGCMarkTerm: "GC mark termination",
1473 stwGCSweepTerm: "GC sweep termination",
1474 stwWriteHeapDump: "write heap dump",
1475 stwGoroutineProfile: "goroutine profile",
1476 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1477 stwAllGoroutinesStack: "all goroutines stack trace",
1478 stwReadMemStats: "read mem stats",
1479 stwAllThreadsSyscall: "AllThreadsSyscall",
1480 stwGOMAXPROCS: "GOMAXPROCS",
1481 stwStartTrace: "start trace",
1482 stwStopTrace: "stop trace",
1483 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1484 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1485 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1486 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1487 stwForTestResetDebugLog: "ResetDebugLog (test)",
1488 }
1489
1490
1491
1492 type worldStop struct {
1493 reason stwReason
1494 startedStopping int64
1495 finishedStopping int64
1496 stoppingCPUTime int64
1497 }
1498
1499
1500
1501
1502 var stopTheWorldContext worldStop
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 func stopTheWorld(reason stwReason) worldStop {
1522 semacquire(&worldsema)
1523 gp := getg()
1524 gp.m.preemptoff = reason.String()
1525 systemstack(func() {
1526 stopTheWorldContext = stopTheWorldWithSema(reason)
1527 })
1528 return stopTheWorldContext
1529 }
1530
1531
1532
1533
1534 func startTheWorld(w worldStop) {
1535 systemstack(func() { startTheWorldWithSema(0, w) })
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 mp := acquirem()
1553 mp.preemptoff = ""
1554 semrelease1(&worldsema, true, 0)
1555 releasem(mp)
1556 }
1557
1558
1559
1560
1561 func stopTheWorldGC(reason stwReason) worldStop {
1562 semacquire(&gcsema)
1563 return stopTheWorld(reason)
1564 }
1565
1566
1567
1568
1569 func startTheWorldGC(w worldStop) {
1570 startTheWorld(w)
1571 semrelease(&gcsema)
1572 }
1573
1574
1575 var worldsema uint32 = 1
1576
1577
1578
1579
1580
1581
1582
1583 var gcsema uint32 = 1
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 func stopTheWorldWithSema(reason stwReason) worldStop {
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1638
1639 trace := traceAcquire()
1640 if trace.ok() {
1641 trace.STWStart(reason)
1642 traceRelease(trace)
1643 }
1644 gp := getg()
1645
1646
1647
1648 if gp.m.locks > 0 {
1649 throw("stopTheWorld: holding locks")
1650 }
1651
1652 lock(&sched.lock)
1653 start := nanotime()
1654 sched.stopwait = gomaxprocs
1655 sched.gcwaiting.Store(true)
1656 preemptall()
1657
1658 gp.m.p.ptr().status = _Pgcstop
1659 gp.m.p.ptr().gcStopTime = start
1660 sched.stopwait--
1661
1662 trace = traceAcquire()
1663 for _, pp := range allp {
1664 s := pp.status
1665 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1666 if trace.ok() {
1667 trace.ProcSteal(pp, false)
1668 }
1669 pp.syscalltick++
1670 pp.gcStopTime = nanotime()
1671 sched.stopwait--
1672 }
1673 }
1674 if trace.ok() {
1675 traceRelease(trace)
1676 }
1677
1678
1679 now := nanotime()
1680 for {
1681 pp, _ := pidleget(now)
1682 if pp == nil {
1683 break
1684 }
1685 pp.status = _Pgcstop
1686 pp.gcStopTime = nanotime()
1687 sched.stopwait--
1688 }
1689 wait := sched.stopwait > 0
1690 unlock(&sched.lock)
1691
1692
1693 if wait {
1694 for {
1695
1696 if notetsleep(&sched.stopnote, 100*1000) {
1697 noteclear(&sched.stopnote)
1698 break
1699 }
1700 preemptall()
1701 }
1702 }
1703
1704 finish := nanotime()
1705 startTime := finish - start
1706 if reason.isGC() {
1707 sched.stwStoppingTimeGC.record(startTime)
1708 } else {
1709 sched.stwStoppingTimeOther.record(startTime)
1710 }
1711
1712
1713
1714
1715
1716 stoppingCPUTime := int64(0)
1717 bad := ""
1718 if sched.stopwait != 0 {
1719 bad = "stopTheWorld: not stopped (stopwait != 0)"
1720 } else {
1721 for _, pp := range allp {
1722 if pp.status != _Pgcstop {
1723 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1724 }
1725 if pp.gcStopTime == 0 && bad == "" {
1726 bad = "stopTheWorld: broken CPU time accounting"
1727 }
1728 stoppingCPUTime += finish - pp.gcStopTime
1729 pp.gcStopTime = 0
1730 }
1731 }
1732 if freezing.Load() {
1733
1734
1735
1736
1737 lock(&deadlock)
1738 lock(&deadlock)
1739 }
1740 if bad != "" {
1741 throw(bad)
1742 }
1743
1744 worldStopped()
1745
1746
1747 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1748
1749 return worldStop{
1750 reason: reason,
1751 startedStopping: start,
1752 finishedStopping: finish,
1753 stoppingCPUTime: stoppingCPUTime,
1754 }
1755 }
1756
1757
1758
1759
1760
1761
1762
1763 func startTheWorldWithSema(now int64, w worldStop) int64 {
1764 assertWorldStopped()
1765
1766 mp := acquirem()
1767 if netpollinited() {
1768 list, delta := netpoll(0)
1769 injectglist(&list)
1770 netpollAdjustWaiters(delta)
1771 }
1772 lock(&sched.lock)
1773
1774 procs := gomaxprocs
1775 if newprocs != 0 {
1776 procs = newprocs
1777 newprocs = 0
1778 }
1779 p1 := procresize(procs)
1780 sched.gcwaiting.Store(false)
1781 if sched.sysmonwait.Load() {
1782 sched.sysmonwait.Store(false)
1783 notewakeup(&sched.sysmonnote)
1784 }
1785 unlock(&sched.lock)
1786
1787 worldStarted()
1788
1789 for p1 != nil {
1790 p := p1
1791 p1 = p1.link.ptr()
1792 if p.m != 0 {
1793 mp := p.m.ptr()
1794 p.m = 0
1795 if mp.nextp != 0 {
1796 throw("startTheWorld: inconsistent mp->nextp")
1797 }
1798 mp.nextp.set(p)
1799 notewakeup(&mp.park)
1800 } else {
1801
1802 newm(nil, p, -1)
1803 }
1804 }
1805
1806
1807 if now == 0 {
1808 now = nanotime()
1809 }
1810 totalTime := now - w.startedStopping
1811 if w.reason.isGC() {
1812 sched.stwTotalTimeGC.record(totalTime)
1813 } else {
1814 sched.stwTotalTimeOther.record(totalTime)
1815 }
1816 trace := traceAcquire()
1817 if trace.ok() {
1818 trace.STWDone()
1819 traceRelease(trace)
1820 }
1821
1822
1823
1824
1825 wakep()
1826
1827 releasem(mp)
1828
1829 return now
1830 }
1831
1832
1833
1834 func usesLibcall() bool {
1835 switch GOOS {
1836 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1837 return true
1838 case "openbsd":
1839 return GOARCH != "mips64"
1840 }
1841 return false
1842 }
1843
1844
1845
1846 func mStackIsSystemAllocated() bool {
1847 switch GOOS {
1848 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1849 return true
1850 case "openbsd":
1851 return GOARCH != "mips64"
1852 }
1853 return false
1854 }
1855
1856
1857
1858 func mstart()
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869 func mstart0() {
1870 gp := getg()
1871
1872 osStack := gp.stack.lo == 0
1873 if osStack {
1874
1875
1876
1877
1878
1879
1880
1881
1882 size := gp.stack.hi
1883 if size == 0 {
1884 size = 16384 * sys.StackGuardMultiplier
1885 }
1886 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1887 gp.stack.lo = gp.stack.hi - size + 1024
1888 }
1889
1890
1891 gp.stackguard0 = gp.stack.lo + stackGuard
1892
1893
1894 gp.stackguard1 = gp.stackguard0
1895 mstart1()
1896
1897
1898 if mStackIsSystemAllocated() {
1899
1900
1901
1902 osStack = true
1903 }
1904 mexit(osStack)
1905 }
1906
1907
1908
1909
1910
1911 func mstart1() {
1912 gp := getg()
1913
1914 if gp != gp.m.g0 {
1915 throw("bad runtime·mstart")
1916 }
1917
1918
1919
1920
1921
1922
1923
1924 gp.sched.g = guintptr(unsafe.Pointer(gp))
1925 gp.sched.pc = sys.GetCallerPC()
1926 gp.sched.sp = sys.GetCallerSP()
1927
1928 asminit()
1929 minit()
1930
1931
1932
1933 if gp.m == &m0 {
1934 mstartm0()
1935 }
1936
1937 if debug.dataindependenttiming == 1 {
1938 sys.EnableDIT()
1939 }
1940
1941 if fn := gp.m.mstartfn; fn != nil {
1942 fn()
1943 }
1944
1945 if gp.m != &m0 {
1946 acquirep(gp.m.nextp.ptr())
1947 gp.m.nextp = 0
1948 }
1949 schedule()
1950 }
1951
1952
1953
1954
1955
1956
1957
1958 func mstartm0() {
1959
1960
1961
1962 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1963 cgoHasExtraM = true
1964 newextram()
1965 }
1966 initsig(false)
1967 }
1968
1969
1970
1971
1972 func mPark() {
1973 gp := getg()
1974 notesleep(&gp.m.park)
1975 noteclear(&gp.m.park)
1976 }
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 func mexit(osStack bool) {
1989 mp := getg().m
1990
1991 if mp == &m0 {
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003 handoffp(releasep())
2004 lock(&sched.lock)
2005 sched.nmfreed++
2006 checkdead()
2007 unlock(&sched.lock)
2008 mPark()
2009 throw("locked m0 woke up")
2010 }
2011
2012 sigblock(true)
2013 unminit()
2014
2015
2016 if mp.gsignal != nil {
2017 stackfree(mp.gsignal.stack)
2018 if valgrindenabled {
2019 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2020 mp.gsignal.valgrindStackID = 0
2021 }
2022
2023
2024
2025
2026 mp.gsignal = nil
2027 }
2028
2029
2030 vgetrandomDestroy(mp)
2031
2032
2033 lock(&sched.lock)
2034 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2035 if *pprev == mp {
2036 *pprev = mp.alllink
2037 goto found
2038 }
2039 }
2040 throw("m not found in allm")
2041 found:
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056 mp.freeWait.Store(freeMWait)
2057 mp.freelink = sched.freem
2058 sched.freem = mp
2059 unlock(&sched.lock)
2060
2061 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2062 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2063
2064
2065 handoffp(releasep())
2066
2067
2068
2069
2070
2071 lock(&sched.lock)
2072 sched.nmfreed++
2073 checkdead()
2074 unlock(&sched.lock)
2075
2076 if GOOS == "darwin" || GOOS == "ios" {
2077
2078
2079 if mp.signalPending.Load() != 0 {
2080 pendingPreemptSignals.Add(-1)
2081 }
2082 }
2083
2084
2085
2086 mdestroy(mp)
2087
2088 if osStack {
2089
2090 mp.freeWait.Store(freeMRef)
2091
2092
2093
2094 return
2095 }
2096
2097
2098
2099
2100
2101 exitThread(&mp.freeWait)
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 func forEachP(reason waitReason, fn func(*p)) {
2115 systemstack(func() {
2116 gp := getg().m.curg
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133 casGToWaitingForSuspendG(gp, _Grunning, reason)
2134 forEachPInternal(fn)
2135 casgstatus(gp, _Gwaiting, _Grunning)
2136 })
2137 }
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148 func forEachPInternal(fn func(*p)) {
2149 mp := acquirem()
2150 pp := getg().m.p.ptr()
2151
2152 lock(&sched.lock)
2153 if sched.safePointWait != 0 {
2154 throw("forEachP: sched.safePointWait != 0")
2155 }
2156 sched.safePointWait = gomaxprocs - 1
2157 sched.safePointFn = fn
2158
2159
2160 for _, p2 := range allp {
2161 if p2 != pp {
2162 atomic.Store(&p2.runSafePointFn, 1)
2163 }
2164 }
2165 preemptall()
2166
2167
2168
2169
2170
2171
2172
2173 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2174 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2175 fn(p)
2176 sched.safePointWait--
2177 }
2178 }
2179
2180 wait := sched.safePointWait > 0
2181 unlock(&sched.lock)
2182
2183
2184 fn(pp)
2185
2186
2187
2188 for _, p2 := range allp {
2189 s := p2.status
2190
2191
2192
2193 trace := traceAcquire()
2194 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2195 if trace.ok() {
2196
2197 trace.ProcSteal(p2, false)
2198 traceRelease(trace)
2199 }
2200 p2.syscalltick++
2201 handoffp(p2)
2202 } else if trace.ok() {
2203 traceRelease(trace)
2204 }
2205 }
2206
2207
2208 if wait {
2209 for {
2210
2211
2212
2213
2214 if notetsleep(&sched.safePointNote, 100*1000) {
2215 noteclear(&sched.safePointNote)
2216 break
2217 }
2218 preemptall()
2219 }
2220 }
2221 if sched.safePointWait != 0 {
2222 throw("forEachP: not done")
2223 }
2224 for _, p2 := range allp {
2225 if p2.runSafePointFn != 0 {
2226 throw("forEachP: P did not run fn")
2227 }
2228 }
2229
2230 lock(&sched.lock)
2231 sched.safePointFn = nil
2232 unlock(&sched.lock)
2233 releasem(mp)
2234 }
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 func runSafePointFn() {
2248 p := getg().m.p.ptr()
2249
2250
2251
2252 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2253 return
2254 }
2255 sched.safePointFn(p)
2256 lock(&sched.lock)
2257 sched.safePointWait--
2258 if sched.safePointWait == 0 {
2259 notewakeup(&sched.safePointNote)
2260 }
2261 unlock(&sched.lock)
2262 }
2263
2264
2265
2266
2267 var cgoThreadStart unsafe.Pointer
2268
2269 type cgothreadstart struct {
2270 g guintptr
2271 tls *uint64
2272 fn unsafe.Pointer
2273 }
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284 func allocm(pp *p, fn func(), id int64) *m {
2285 allocmLock.rlock()
2286
2287
2288
2289
2290 acquirem()
2291
2292 gp := getg()
2293 if gp.m.p == 0 {
2294 acquirep(pp)
2295 }
2296
2297
2298
2299 if sched.freem != nil {
2300 lock(&sched.lock)
2301 var newList *m
2302 for freem := sched.freem; freem != nil; {
2303
2304 wait := freem.freeWait.Load()
2305 if wait == freeMWait {
2306 next := freem.freelink
2307 freem.freelink = newList
2308 newList = freem
2309 freem = next
2310 continue
2311 }
2312
2313
2314
2315 if traceEnabled() || traceShuttingDown() {
2316 traceThreadDestroy(freem)
2317 }
2318
2319
2320
2321 if wait == freeMStack {
2322
2323
2324
2325 systemstack(func() {
2326 stackfree(freem.g0.stack)
2327 if valgrindenabled {
2328 valgrindDeregisterStack(freem.g0.valgrindStackID)
2329 freem.g0.valgrindStackID = 0
2330 }
2331 })
2332 }
2333 freem = freem.freelink
2334 }
2335 sched.freem = newList
2336 unlock(&sched.lock)
2337 }
2338
2339 mp := &new(mPadded).m
2340 mp.mstartfn = fn
2341 mcommoninit(mp, id)
2342
2343
2344
2345 if iscgo || mStackIsSystemAllocated() {
2346 mp.g0 = malg(-1)
2347 } else {
2348 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2349 }
2350 mp.g0.m = mp
2351
2352 if pp == gp.m.p.ptr() {
2353 releasep()
2354 }
2355
2356 releasem(gp.m)
2357 allocmLock.runlock()
2358 return mp
2359 }
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 func needm(signal bool) {
2401 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2402
2403
2404
2405
2406
2407
2408 writeErrStr("fatal error: cgo callback before cgo call\n")
2409 exit(1)
2410 }
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420 var sigmask sigset
2421 sigsave(&sigmask)
2422 sigblock(false)
2423
2424
2425
2426
2427 mp, last := getExtraM()
2428
2429
2430
2431
2432
2433
2434
2435
2436 mp.needextram = last
2437
2438
2439 mp.sigmask = sigmask
2440
2441
2442
2443 osSetupTLS(mp)
2444
2445
2446
2447 setg(mp.g0)
2448 sp := sys.GetCallerSP()
2449 callbackUpdateSystemStack(mp, sp, signal)
2450
2451
2452
2453
2454 mp.isExtraInC = false
2455
2456
2457 asminit()
2458 minit()
2459
2460
2461
2462
2463
2464
2465 var trace traceLocker
2466 if !signal {
2467 trace = traceAcquire()
2468 }
2469
2470
2471 casgstatus(mp.curg, _Gdead, _Gsyscall)
2472 sched.ngsys.Add(-1)
2473
2474 if !signal {
2475 if trace.ok() {
2476 trace.GoCreateSyscall(mp.curg)
2477 traceRelease(trace)
2478 }
2479 }
2480 mp.isExtraInSig = signal
2481 }
2482
2483
2484
2485
2486 func needAndBindM() {
2487 needm(false)
2488
2489 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2490 cgoBindM()
2491 }
2492 }
2493
2494
2495
2496
2497 func newextram() {
2498 c := extraMWaiters.Swap(0)
2499 if c > 0 {
2500 for i := uint32(0); i < c; i++ {
2501 oneNewExtraM()
2502 }
2503 } else if extraMLength.Load() == 0 {
2504
2505 oneNewExtraM()
2506 }
2507 }
2508
2509
2510 func oneNewExtraM() {
2511
2512
2513
2514
2515
2516 mp := allocm(nil, nil, -1)
2517 gp := malg(4096)
2518 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2519 gp.sched.sp = gp.stack.hi
2520 gp.sched.sp -= 4 * goarch.PtrSize
2521 gp.sched.lr = 0
2522 gp.sched.g = guintptr(unsafe.Pointer(gp))
2523 gp.syscallpc = gp.sched.pc
2524 gp.syscallsp = gp.sched.sp
2525 gp.stktopsp = gp.sched.sp
2526
2527
2528
2529
2530 casgstatus(gp, _Gidle, _Gdead)
2531 gp.m = mp
2532 mp.curg = gp
2533 mp.isextra = true
2534
2535 mp.isExtraInC = true
2536 mp.lockedInt++
2537 mp.lockedg.set(gp)
2538 gp.lockedm.set(mp)
2539 gp.goid = sched.goidgen.Add(1)
2540 if raceenabled {
2541 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2542 }
2543
2544 allgadd(gp)
2545
2546
2547
2548
2549
2550 sched.ngsys.Add(1)
2551
2552
2553 addExtraM(mp)
2554 }
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589 func dropm() {
2590
2591
2592
2593 mp := getg().m
2594
2595
2596
2597
2598
2599 var trace traceLocker
2600 if !mp.isExtraInSig {
2601 trace = traceAcquire()
2602 }
2603
2604
2605 casgstatus(mp.curg, _Gsyscall, _Gdead)
2606 mp.curg.preemptStop = false
2607 sched.ngsys.Add(1)
2608
2609 if !mp.isExtraInSig {
2610 if trace.ok() {
2611 trace.GoDestroySyscall()
2612 traceRelease(trace)
2613 }
2614 }
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629 mp.syscalltick--
2630
2631
2632
2633 mp.curg.trace.reset()
2634
2635
2636
2637
2638 if traceEnabled() || traceShuttingDown() {
2639
2640
2641
2642
2643
2644
2645
2646 lock(&sched.lock)
2647 traceThreadDestroy(mp)
2648 unlock(&sched.lock)
2649 }
2650 mp.isExtraInSig = false
2651
2652
2653
2654
2655
2656 sigmask := mp.sigmask
2657 sigblock(false)
2658 unminit()
2659
2660 setg(nil)
2661
2662
2663
2664 g0 := mp.g0
2665 g0.stack.hi = 0
2666 g0.stack.lo = 0
2667 g0.stackguard0 = 0
2668 g0.stackguard1 = 0
2669 mp.g0StackAccurate = false
2670
2671 putExtraM(mp)
2672
2673 msigrestore(sigmask)
2674 }
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696 func cgoBindM() {
2697 if GOOS == "windows" || GOOS == "plan9" {
2698 fatal("bindm in unexpected GOOS")
2699 }
2700 g := getg()
2701 if g.m.g0 != g {
2702 fatal("the current g is not g0")
2703 }
2704 if _cgo_bindm != nil {
2705 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2706 }
2707 }
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720 func getm() uintptr {
2721 return uintptr(unsafe.Pointer(getg().m))
2722 }
2723
2724 var (
2725
2726
2727
2728
2729
2730
2731 extraM atomic.Uintptr
2732
2733 extraMLength atomic.Uint32
2734
2735 extraMWaiters atomic.Uint32
2736
2737
2738 extraMInUse atomic.Uint32
2739 )
2740
2741
2742
2743
2744
2745
2746
2747
2748 func lockextra(nilokay bool) *m {
2749 const locked = 1
2750
2751 incr := false
2752 for {
2753 old := extraM.Load()
2754 if old == locked {
2755 osyield_no_g()
2756 continue
2757 }
2758 if old == 0 && !nilokay {
2759 if !incr {
2760
2761
2762
2763 extraMWaiters.Add(1)
2764 incr = true
2765 }
2766 usleep_no_g(1)
2767 continue
2768 }
2769 if extraM.CompareAndSwap(old, locked) {
2770 return (*m)(unsafe.Pointer(old))
2771 }
2772 osyield_no_g()
2773 continue
2774 }
2775 }
2776
2777
2778 func unlockextra(mp *m, delta int32) {
2779 extraMLength.Add(delta)
2780 extraM.Store(uintptr(unsafe.Pointer(mp)))
2781 }
2782
2783
2784
2785
2786
2787
2788
2789
2790 func getExtraM() (mp *m, last bool) {
2791 mp = lockextra(false)
2792 extraMInUse.Add(1)
2793 unlockextra(mp.schedlink.ptr(), -1)
2794 return mp, mp.schedlink.ptr() == nil
2795 }
2796
2797
2798
2799
2800
2801 func putExtraM(mp *m) {
2802 extraMInUse.Add(-1)
2803 addExtraM(mp)
2804 }
2805
2806
2807
2808
2809 func addExtraM(mp *m) {
2810 mnext := lockextra(true)
2811 mp.schedlink.set(mnext)
2812 unlockextra(mp, 1)
2813 }
2814
2815 var (
2816
2817
2818
2819 allocmLock rwmutex
2820
2821
2822
2823
2824 execLock rwmutex
2825 )
2826
2827
2828
2829 const (
2830 failthreadcreate = "runtime: failed to create new OS thread\n"
2831 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2832 )
2833
2834
2835
2836
2837 var newmHandoff struct {
2838 lock mutex
2839
2840
2841
2842 newm muintptr
2843
2844
2845
2846 waiting bool
2847 wake note
2848
2849
2850
2851
2852 haveTemplateThread uint32
2853 }
2854
2855
2856
2857
2858
2859
2860
2861
2862 func newm(fn func(), pp *p, id int64) {
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873 acquirem()
2874
2875 mp := allocm(pp, fn, id)
2876 mp.nextp.set(pp)
2877 mp.sigmask = initSigmask
2878 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890 lock(&newmHandoff.lock)
2891 if newmHandoff.haveTemplateThread == 0 {
2892 throw("on a locked thread with no template thread")
2893 }
2894 mp.schedlink = newmHandoff.newm
2895 newmHandoff.newm.set(mp)
2896 if newmHandoff.waiting {
2897 newmHandoff.waiting = false
2898 notewakeup(&newmHandoff.wake)
2899 }
2900 unlock(&newmHandoff.lock)
2901
2902
2903
2904 releasem(getg().m)
2905 return
2906 }
2907 newm1(mp)
2908 releasem(getg().m)
2909 }
2910
2911 func newm1(mp *m) {
2912 if iscgo {
2913 var ts cgothreadstart
2914 if _cgo_thread_start == nil {
2915 throw("_cgo_thread_start missing")
2916 }
2917 ts.g.set(mp.g0)
2918 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2919 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2920 if msanenabled {
2921 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2922 }
2923 if asanenabled {
2924 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2925 }
2926 execLock.rlock()
2927 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2928 execLock.runlock()
2929 return
2930 }
2931 execLock.rlock()
2932 newosproc(mp)
2933 execLock.runlock()
2934 }
2935
2936
2937
2938
2939
2940 func startTemplateThread() {
2941 if GOARCH == "wasm" {
2942 return
2943 }
2944
2945
2946
2947 mp := acquirem()
2948 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2949 releasem(mp)
2950 return
2951 }
2952 newm(templateThread, nil, -1)
2953 releasem(mp)
2954 }
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968 func templateThread() {
2969 lock(&sched.lock)
2970 sched.nmsys++
2971 checkdead()
2972 unlock(&sched.lock)
2973
2974 for {
2975 lock(&newmHandoff.lock)
2976 for newmHandoff.newm != 0 {
2977 newm := newmHandoff.newm.ptr()
2978 newmHandoff.newm = 0
2979 unlock(&newmHandoff.lock)
2980 for newm != nil {
2981 next := newm.schedlink.ptr()
2982 newm.schedlink = 0
2983 newm1(newm)
2984 newm = next
2985 }
2986 lock(&newmHandoff.lock)
2987 }
2988 newmHandoff.waiting = true
2989 noteclear(&newmHandoff.wake)
2990 unlock(&newmHandoff.lock)
2991 notesleep(&newmHandoff.wake)
2992 }
2993 }
2994
2995
2996
2997 func stopm() {
2998 gp := getg()
2999
3000 if gp.m.locks != 0 {
3001 throw("stopm holding locks")
3002 }
3003 if gp.m.p != 0 {
3004 throw("stopm holding p")
3005 }
3006 if gp.m.spinning {
3007 throw("stopm spinning")
3008 }
3009
3010 lock(&sched.lock)
3011 mput(gp.m)
3012 unlock(&sched.lock)
3013 mPark()
3014 acquirep(gp.m.nextp.ptr())
3015 gp.m.nextp = 0
3016 }
3017
3018 func mspinning() {
3019
3020 getg().m.spinning = true
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040 func startm(pp *p, spinning, lockheld bool) {
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057 mp := acquirem()
3058 if !lockheld {
3059 lock(&sched.lock)
3060 }
3061 if pp == nil {
3062 if spinning {
3063
3064
3065
3066 throw("startm: P required for spinning=true")
3067 }
3068 pp, _ = pidleget(0)
3069 if pp == nil {
3070 if !lockheld {
3071 unlock(&sched.lock)
3072 }
3073 releasem(mp)
3074 return
3075 }
3076 }
3077 nmp := mget()
3078 if nmp == nil {
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093 id := mReserveID()
3094 unlock(&sched.lock)
3095
3096 var fn func()
3097 if spinning {
3098
3099 fn = mspinning
3100 }
3101 newm(fn, pp, id)
3102
3103 if lockheld {
3104 lock(&sched.lock)
3105 }
3106
3107
3108 releasem(mp)
3109 return
3110 }
3111 if !lockheld {
3112 unlock(&sched.lock)
3113 }
3114 if nmp.spinning {
3115 throw("startm: m is spinning")
3116 }
3117 if nmp.nextp != 0 {
3118 throw("startm: m has p")
3119 }
3120 if spinning && !runqempty(pp) {
3121 throw("startm: p has runnable gs")
3122 }
3123
3124 nmp.spinning = spinning
3125 nmp.nextp.set(pp)
3126 notewakeup(&nmp.park)
3127
3128
3129 releasem(mp)
3130 }
3131
3132
3133
3134
3135
3136 func handoffp(pp *p) {
3137
3138
3139
3140
3141 if !runqempty(pp) || !sched.runq.empty() {
3142 startm(pp, false, false)
3143 return
3144 }
3145
3146 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3147 startm(pp, false, false)
3148 return
3149 }
3150
3151 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3152 startm(pp, false, false)
3153 return
3154 }
3155
3156
3157 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3158 sched.needspinning.Store(0)
3159 startm(pp, true, false)
3160 return
3161 }
3162 lock(&sched.lock)
3163 if sched.gcwaiting.Load() {
3164 pp.status = _Pgcstop
3165 pp.gcStopTime = nanotime()
3166 sched.stopwait--
3167 if sched.stopwait == 0 {
3168 notewakeup(&sched.stopnote)
3169 }
3170 unlock(&sched.lock)
3171 return
3172 }
3173 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3174 sched.safePointFn(pp)
3175 sched.safePointWait--
3176 if sched.safePointWait == 0 {
3177 notewakeup(&sched.safePointNote)
3178 }
3179 }
3180 if !sched.runq.empty() {
3181 unlock(&sched.lock)
3182 startm(pp, false, false)
3183 return
3184 }
3185
3186
3187 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3188 unlock(&sched.lock)
3189 startm(pp, false, false)
3190 return
3191 }
3192
3193
3194
3195 when := pp.timers.wakeTime()
3196 pidleput(pp, 0)
3197 unlock(&sched.lock)
3198
3199 if when != 0 {
3200 wakeNetPoller(when)
3201 }
3202 }
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217 func wakep() {
3218
3219
3220 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3221 return
3222 }
3223
3224
3225
3226
3227
3228
3229 mp := acquirem()
3230
3231 var pp *p
3232 lock(&sched.lock)
3233 pp, _ = pidlegetSpinning(0)
3234 if pp == nil {
3235 if sched.nmspinning.Add(-1) < 0 {
3236 throw("wakep: negative nmspinning")
3237 }
3238 unlock(&sched.lock)
3239 releasem(mp)
3240 return
3241 }
3242
3243
3244
3245
3246 unlock(&sched.lock)
3247
3248 startm(pp, true, false)
3249
3250 releasem(mp)
3251 }
3252
3253
3254
3255 func stoplockedm() {
3256 gp := getg()
3257
3258 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3259 throw("stoplockedm: inconsistent locking")
3260 }
3261 if gp.m.p != 0 {
3262
3263 pp := releasep()
3264 handoffp(pp)
3265 }
3266 incidlelocked(1)
3267
3268 mPark()
3269 status := readgstatus(gp.m.lockedg.ptr())
3270 if status&^_Gscan != _Grunnable {
3271 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3272 dumpgstatus(gp.m.lockedg.ptr())
3273 throw("stoplockedm: not runnable")
3274 }
3275 acquirep(gp.m.nextp.ptr())
3276 gp.m.nextp = 0
3277 }
3278
3279
3280
3281
3282
3283 func startlockedm(gp *g) {
3284 mp := gp.lockedm.ptr()
3285 if mp == getg().m {
3286 throw("startlockedm: locked to me")
3287 }
3288 if mp.nextp != 0 {
3289 throw("startlockedm: m has p")
3290 }
3291
3292 incidlelocked(-1)
3293 pp := releasep()
3294 mp.nextp.set(pp)
3295 notewakeup(&mp.park)
3296 stopm()
3297 }
3298
3299
3300
3301 func gcstopm() {
3302 gp := getg()
3303
3304 if !sched.gcwaiting.Load() {
3305 throw("gcstopm: not waiting for gc")
3306 }
3307 if gp.m.spinning {
3308 gp.m.spinning = false
3309
3310
3311 if sched.nmspinning.Add(-1) < 0 {
3312 throw("gcstopm: negative nmspinning")
3313 }
3314 }
3315 pp := releasep()
3316 lock(&sched.lock)
3317 pp.status = _Pgcstop
3318 pp.gcStopTime = nanotime()
3319 sched.stopwait--
3320 if sched.stopwait == 0 {
3321 notewakeup(&sched.stopnote)
3322 }
3323 unlock(&sched.lock)
3324 stopm()
3325 }
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336 func execute(gp *g, inheritTime bool) {
3337 mp := getg().m
3338
3339 if goroutineProfile.active {
3340
3341
3342
3343 tryRecordGoroutineProfile(gp, nil, osyield)
3344 }
3345
3346
3347 mp.curg = gp
3348 gp.m = mp
3349 gp.syncSafePoint = false
3350 casgstatus(gp, _Grunnable, _Grunning)
3351 gp.waitsince = 0
3352 gp.preempt = false
3353 gp.stackguard0 = gp.stack.lo + stackGuard
3354 if !inheritTime {
3355 mp.p.ptr().schedtick++
3356 }
3357
3358
3359 hz := sched.profilehz
3360 if mp.profilehz != hz {
3361 setThreadCPUProfiler(hz)
3362 }
3363
3364 trace := traceAcquire()
3365 if trace.ok() {
3366 trace.GoStart()
3367 traceRelease(trace)
3368 }
3369
3370 gogo(&gp.sched)
3371 }
3372
3373
3374
3375
3376
3377 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3378 mp := getg().m
3379
3380
3381
3382
3383
3384 top:
3385
3386
3387
3388 mp.clearAllpSnapshot()
3389
3390 pp := mp.p.ptr()
3391 if sched.gcwaiting.Load() {
3392 gcstopm()
3393 goto top
3394 }
3395 if pp.runSafePointFn != 0 {
3396 runSafePointFn()
3397 }
3398
3399
3400
3401
3402
3403 now, pollUntil, _ := pp.timers.check(0, nil)
3404
3405
3406 if traceEnabled() || traceShuttingDown() {
3407 gp := traceReader()
3408 if gp != nil {
3409 trace := traceAcquire()
3410 casgstatus(gp, _Gwaiting, _Grunnable)
3411 if trace.ok() {
3412 trace.GoUnpark(gp, 0)
3413 traceRelease(trace)
3414 }
3415 return gp, false, true
3416 }
3417 }
3418
3419
3420 if gcBlackenEnabled != 0 {
3421 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3422 if gp != nil {
3423 return gp, false, true
3424 }
3425 now = tnow
3426 }
3427
3428
3429
3430
3431 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3432 lock(&sched.lock)
3433 gp := globrunqget()
3434 unlock(&sched.lock)
3435 if gp != nil {
3436 return gp, false, false
3437 }
3438 }
3439
3440
3441 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3442 if gp := wakefing(); gp != nil {
3443 ready(gp, 0, true)
3444 }
3445 }
3446
3447
3448 if gcCleanups.needsWake() {
3449 gcCleanups.wake()
3450 }
3451
3452 if *cgo_yield != nil {
3453 asmcgocall(*cgo_yield, nil)
3454 }
3455
3456
3457 if gp, inheritTime := runqget(pp); gp != nil {
3458 return gp, inheritTime, false
3459 }
3460
3461
3462 if !sched.runq.empty() {
3463 lock(&sched.lock)
3464 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3465 unlock(&sched.lock)
3466 if gp != nil {
3467 if runqputbatch(pp, &q); !q.empty() {
3468 throw("Couldn't put Gs into empty local runq")
3469 }
3470 return gp, false, false
3471 }
3472 }
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3484 list, delta := netpoll(0)
3485 sched.pollingNet.Store(0)
3486 if !list.empty() {
3487 gp := list.pop()
3488 injectglist(&list)
3489 netpollAdjustWaiters(delta)
3490 trace := traceAcquire()
3491 casgstatus(gp, _Gwaiting, _Grunnable)
3492 if trace.ok() {
3493 trace.GoUnpark(gp, 0)
3494 traceRelease(trace)
3495 }
3496 return gp, false, false
3497 }
3498 }
3499
3500
3501
3502
3503
3504
3505 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3506 if !mp.spinning {
3507 mp.becomeSpinning()
3508 }
3509
3510 gp, inheritTime, tnow, w, newWork := stealWork(now)
3511 if gp != nil {
3512
3513 return gp, inheritTime, false
3514 }
3515 if newWork {
3516
3517
3518 goto top
3519 }
3520
3521 now = tnow
3522 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3523
3524 pollUntil = w
3525 }
3526 }
3527
3528
3529
3530
3531
3532 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3533 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3534 if node != nil {
3535 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3536 gp := node.gp.ptr()
3537
3538 trace := traceAcquire()
3539 casgstatus(gp, _Gwaiting, _Grunnable)
3540 if trace.ok() {
3541 trace.GoUnpark(gp, 0)
3542 traceRelease(trace)
3543 }
3544 return gp, false, false
3545 }
3546 gcController.removeIdleMarkWorker()
3547 }
3548
3549
3550
3551
3552
3553 gp, otherReady := beforeIdle(now, pollUntil)
3554 if gp != nil {
3555 trace := traceAcquire()
3556 casgstatus(gp, _Gwaiting, _Grunnable)
3557 if trace.ok() {
3558 trace.GoUnpark(gp, 0)
3559 traceRelease(trace)
3560 }
3561 return gp, false, false
3562 }
3563 if otherReady {
3564 goto top
3565 }
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575 allpSnapshot := mp.snapshotAllp()
3576
3577
3578 idlepMaskSnapshot := idlepMask
3579 timerpMaskSnapshot := timerpMask
3580
3581
3582 lock(&sched.lock)
3583 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3584 unlock(&sched.lock)
3585 goto top
3586 }
3587 if !sched.runq.empty() {
3588 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3589 unlock(&sched.lock)
3590 if gp == nil {
3591 throw("global runq empty with non-zero runqsize")
3592 }
3593 if runqputbatch(pp, &q); !q.empty() {
3594 throw("Couldn't put Gs into empty local runq")
3595 }
3596 return gp, false, false
3597 }
3598 if !mp.spinning && sched.needspinning.Load() == 1 {
3599
3600 mp.becomeSpinning()
3601 unlock(&sched.lock)
3602 goto top
3603 }
3604 if releasep() != pp {
3605 throw("findrunnable: wrong p")
3606 }
3607 now = pidleput(pp, now)
3608 unlock(&sched.lock)
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646 wasSpinning := mp.spinning
3647 if mp.spinning {
3648 mp.spinning = false
3649 if sched.nmspinning.Add(-1) < 0 {
3650 throw("findrunnable: negative nmspinning")
3651 }
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664 lock(&sched.lock)
3665 if !sched.runq.empty() {
3666 pp, _ := pidlegetSpinning(0)
3667 if pp != nil {
3668 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3669 unlock(&sched.lock)
3670 if gp == nil {
3671 throw("global runq empty with non-zero runqsize")
3672 }
3673 if runqputbatch(pp, &q); !q.empty() {
3674 throw("Couldn't put Gs into empty local runq")
3675 }
3676 acquirep(pp)
3677 mp.becomeSpinning()
3678 return gp, false, false
3679 }
3680 }
3681 unlock(&sched.lock)
3682
3683 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3684 if pp != nil {
3685 acquirep(pp)
3686 mp.becomeSpinning()
3687 goto top
3688 }
3689
3690
3691 pp, gp := checkIdleGCNoP()
3692 if pp != nil {
3693 acquirep(pp)
3694 mp.becomeSpinning()
3695
3696
3697 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3698 trace := traceAcquire()
3699 casgstatus(gp, _Gwaiting, _Grunnable)
3700 if trace.ok() {
3701 trace.GoUnpark(gp, 0)
3702 traceRelease(trace)
3703 }
3704 return gp, false, false
3705 }
3706
3707
3708
3709
3710
3711
3712
3713 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3714 }
3715
3716
3717
3718
3719
3720 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3721 sched.pollUntil.Store(pollUntil)
3722 if mp.p != 0 {
3723 throw("findrunnable: netpoll with p")
3724 }
3725 if mp.spinning {
3726 throw("findrunnable: netpoll with spinning")
3727 }
3728 delay := int64(-1)
3729 if pollUntil != 0 {
3730 if now == 0 {
3731 now = nanotime()
3732 }
3733 delay = pollUntil - now
3734 if delay < 0 {
3735 delay = 0
3736 }
3737 }
3738 if faketime != 0 {
3739
3740 delay = 0
3741 }
3742 list, delta := netpoll(delay)
3743
3744 now = nanotime()
3745 sched.pollUntil.Store(0)
3746 sched.lastpoll.Store(now)
3747 if faketime != 0 && list.empty() {
3748
3749
3750 stopm()
3751 goto top
3752 }
3753 lock(&sched.lock)
3754 pp, _ := pidleget(now)
3755 unlock(&sched.lock)
3756 if pp == nil {
3757 injectglist(&list)
3758 netpollAdjustWaiters(delta)
3759 } else {
3760 acquirep(pp)
3761 if !list.empty() {
3762 gp := list.pop()
3763 injectglist(&list)
3764 netpollAdjustWaiters(delta)
3765 trace := traceAcquire()
3766 casgstatus(gp, _Gwaiting, _Grunnable)
3767 if trace.ok() {
3768 trace.GoUnpark(gp, 0)
3769 traceRelease(trace)
3770 }
3771 return gp, false, false
3772 }
3773 if wasSpinning {
3774 mp.becomeSpinning()
3775 }
3776 goto top
3777 }
3778 } else if pollUntil != 0 && netpollinited() {
3779 pollerPollUntil := sched.pollUntil.Load()
3780 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3781 netpollBreak()
3782 }
3783 }
3784 stopm()
3785 goto top
3786 }
3787
3788
3789
3790
3791
3792 func pollWork() bool {
3793 if !sched.runq.empty() {
3794 return true
3795 }
3796 p := getg().m.p.ptr()
3797 if !runqempty(p) {
3798 return true
3799 }
3800 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3801 if list, delta := netpoll(0); !list.empty() {
3802 injectglist(&list)
3803 netpollAdjustWaiters(delta)
3804 return true
3805 }
3806 }
3807 return false
3808 }
3809
3810
3811
3812
3813
3814
3815
3816 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3817 pp := getg().m.p.ptr()
3818
3819 ranTimer := false
3820
3821 const stealTries = 4
3822 for i := 0; i < stealTries; i++ {
3823 stealTimersOrRunNextG := i == stealTries-1
3824
3825 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3826 if sched.gcwaiting.Load() {
3827
3828 return nil, false, now, pollUntil, true
3829 }
3830 p2 := allp[enum.position()]
3831 if pp == p2 {
3832 continue
3833 }
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3849 tnow, w, ran := p2.timers.check(now, nil)
3850 now = tnow
3851 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3852 pollUntil = w
3853 }
3854 if ran {
3855
3856
3857
3858
3859
3860
3861
3862
3863 if gp, inheritTime := runqget(pp); gp != nil {
3864 return gp, inheritTime, now, pollUntil, ranTimer
3865 }
3866 ranTimer = true
3867 }
3868 }
3869
3870
3871 if !idlepMask.read(enum.position()) {
3872 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3873 return gp, false, now, pollUntil, ranTimer
3874 }
3875 }
3876 }
3877 }
3878
3879
3880
3881
3882 return nil, false, now, pollUntil, ranTimer
3883 }
3884
3885
3886
3887
3888
3889
3890 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3891 for id, p2 := range allpSnapshot {
3892 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3893 lock(&sched.lock)
3894 pp, _ := pidlegetSpinning(0)
3895 if pp == nil {
3896
3897 unlock(&sched.lock)
3898 return nil
3899 }
3900 unlock(&sched.lock)
3901 return pp
3902 }
3903 }
3904
3905
3906 return nil
3907 }
3908
3909
3910
3911
3912 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3913 for id, p2 := range allpSnapshot {
3914 if timerpMaskSnapshot.read(uint32(id)) {
3915 w := p2.timers.wakeTime()
3916 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3917 pollUntil = w
3918 }
3919 }
3920 }
3921
3922 return pollUntil
3923 }
3924
3925
3926
3927
3928
3929 func checkIdleGCNoP() (*p, *g) {
3930
3931
3932
3933
3934
3935
3936 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3937 return nil, nil
3938 }
3939 if !gcMarkWorkAvailable(nil) {
3940 return nil, nil
3941 }
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960 lock(&sched.lock)
3961 pp, now := pidlegetSpinning(0)
3962 if pp == nil {
3963 unlock(&sched.lock)
3964 return nil, nil
3965 }
3966
3967
3968 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3969 pidleput(pp, now)
3970 unlock(&sched.lock)
3971 return nil, nil
3972 }
3973
3974 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3975 if node == nil {
3976 pidleput(pp, now)
3977 unlock(&sched.lock)
3978 gcController.removeIdleMarkWorker()
3979 return nil, nil
3980 }
3981
3982 unlock(&sched.lock)
3983
3984 return pp, node.gp.ptr()
3985 }
3986
3987
3988
3989
3990 func wakeNetPoller(when int64) {
3991 if sched.lastpoll.Load() == 0 {
3992
3993
3994
3995
3996 pollerPollUntil := sched.pollUntil.Load()
3997 if pollerPollUntil == 0 || pollerPollUntil > when {
3998 netpollBreak()
3999 }
4000 } else {
4001
4002
4003 if GOOS != "plan9" {
4004 wakep()
4005 }
4006 }
4007 }
4008
4009 func resetspinning() {
4010 gp := getg()
4011 if !gp.m.spinning {
4012 throw("resetspinning: not a spinning m")
4013 }
4014 gp.m.spinning = false
4015 nmspinning := sched.nmspinning.Add(-1)
4016 if nmspinning < 0 {
4017 throw("findrunnable: negative nmspinning")
4018 }
4019
4020
4021
4022 wakep()
4023 }
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033 func injectglist(glist *gList) {
4034 if glist.empty() {
4035 return
4036 }
4037
4038
4039
4040 var tail *g
4041 trace := traceAcquire()
4042 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4043 tail = gp
4044 casgstatus(gp, _Gwaiting, _Grunnable)
4045 if trace.ok() {
4046 trace.GoUnpark(gp, 0)
4047 }
4048 }
4049 if trace.ok() {
4050 traceRelease(trace)
4051 }
4052
4053
4054 q := gQueue{glist.head, tail.guintptr(), glist.size}
4055 *glist = gList{}
4056
4057 startIdle := func(n int32) {
4058 for ; n > 0; n-- {
4059 mp := acquirem()
4060 lock(&sched.lock)
4061
4062 pp, _ := pidlegetSpinning(0)
4063 if pp == nil {
4064 unlock(&sched.lock)
4065 releasem(mp)
4066 break
4067 }
4068
4069 startm(pp, false, true)
4070 unlock(&sched.lock)
4071 releasem(mp)
4072 }
4073 }
4074
4075 pp := getg().m.p.ptr()
4076 if pp == nil {
4077 n := q.size
4078 lock(&sched.lock)
4079 globrunqputbatch(&q)
4080 unlock(&sched.lock)
4081 startIdle(n)
4082 return
4083 }
4084
4085 var globq gQueue
4086 npidle := sched.npidle.Load()
4087 for ; npidle > 0 && !q.empty(); npidle-- {
4088 g := q.pop()
4089 globq.pushBack(g)
4090 }
4091 if !globq.empty() {
4092 n := globq.size
4093 lock(&sched.lock)
4094 globrunqputbatch(&globq)
4095 unlock(&sched.lock)
4096 startIdle(n)
4097 }
4098
4099 if runqputbatch(pp, &q); !q.empty() {
4100 lock(&sched.lock)
4101 globrunqputbatch(&q)
4102 unlock(&sched.lock)
4103 }
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118 wakep()
4119 }
4120
4121
4122
4123 func schedule() {
4124 mp := getg().m
4125
4126 if mp.locks != 0 {
4127 throw("schedule: holding locks")
4128 }
4129
4130 if mp.lockedg != 0 {
4131 stoplockedm()
4132 execute(mp.lockedg.ptr(), false)
4133 }
4134
4135
4136
4137 if mp.incgo {
4138 throw("schedule: in cgo")
4139 }
4140
4141 top:
4142 pp := mp.p.ptr()
4143 pp.preempt = false
4144
4145
4146
4147
4148 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4149 throw("schedule: spinning with local work")
4150 }
4151
4152 gp, inheritTime, tryWakeP := findRunnable()
4153
4154
4155
4156
4157 mp.clearAllpSnapshot()
4158
4159 if debug.dontfreezetheworld > 0 && freezing.Load() {
4160
4161
4162
4163
4164
4165
4166
4167 lock(&deadlock)
4168 lock(&deadlock)
4169 }
4170
4171
4172
4173
4174 if mp.spinning {
4175 resetspinning()
4176 }
4177
4178 if sched.disable.user && !schedEnabled(gp) {
4179
4180
4181
4182 lock(&sched.lock)
4183 if schedEnabled(gp) {
4184
4185
4186 unlock(&sched.lock)
4187 } else {
4188 sched.disable.runnable.pushBack(gp)
4189 unlock(&sched.lock)
4190 goto top
4191 }
4192 }
4193
4194
4195
4196 if tryWakeP {
4197 wakep()
4198 }
4199 if gp.lockedm != 0 {
4200
4201
4202 startlockedm(gp)
4203 goto top
4204 }
4205
4206 execute(gp, inheritTime)
4207 }
4208
4209
4210
4211
4212
4213
4214
4215
4216 func dropg() {
4217 gp := getg()
4218
4219 setMNoWB(&gp.m.curg.m, nil)
4220 setGNoWB(&gp.m.curg, nil)
4221 }
4222
4223 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4224 unlock((*mutex)(lock))
4225 return true
4226 }
4227
4228
4229 func park_m(gp *g) {
4230 mp := getg().m
4231
4232 trace := traceAcquire()
4233
4234
4235
4236
4237
4238 bubble := gp.bubble
4239 if bubble != nil {
4240 bubble.incActive()
4241 }
4242
4243 if trace.ok() {
4244
4245
4246
4247 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4248 }
4249
4250
4251 casgstatus(gp, _Grunning, _Gwaiting)
4252 if trace.ok() {
4253 traceRelease(trace)
4254 }
4255
4256 dropg()
4257
4258 if fn := mp.waitunlockf; fn != nil {
4259 ok := fn(gp, mp.waitlock)
4260 mp.waitunlockf = nil
4261 mp.waitlock = nil
4262 if !ok {
4263 trace := traceAcquire()
4264 casgstatus(gp, _Gwaiting, _Grunnable)
4265 if bubble != nil {
4266 bubble.decActive()
4267 }
4268 if trace.ok() {
4269 trace.GoUnpark(gp, 2)
4270 traceRelease(trace)
4271 }
4272 execute(gp, true)
4273 }
4274 }
4275
4276 if bubble != nil {
4277 bubble.decActive()
4278 }
4279
4280 schedule()
4281 }
4282
4283 func goschedImpl(gp *g, preempted bool) {
4284 trace := traceAcquire()
4285 status := readgstatus(gp)
4286 if status&^_Gscan != _Grunning {
4287 dumpgstatus(gp)
4288 throw("bad g status")
4289 }
4290 if trace.ok() {
4291
4292
4293
4294 if preempted {
4295 trace.GoPreempt()
4296 } else {
4297 trace.GoSched()
4298 }
4299 }
4300 casgstatus(gp, _Grunning, _Grunnable)
4301 if trace.ok() {
4302 traceRelease(trace)
4303 }
4304
4305 dropg()
4306 lock(&sched.lock)
4307 globrunqput(gp)
4308 unlock(&sched.lock)
4309
4310 if mainStarted {
4311 wakep()
4312 }
4313
4314 schedule()
4315 }
4316
4317
4318 func gosched_m(gp *g) {
4319 goschedImpl(gp, false)
4320 }
4321
4322
4323 func goschedguarded_m(gp *g) {
4324 if !canPreemptM(gp.m) {
4325 gogo(&gp.sched)
4326 }
4327 goschedImpl(gp, false)
4328 }
4329
4330 func gopreempt_m(gp *g) {
4331 goschedImpl(gp, true)
4332 }
4333
4334
4335
4336
4337 func preemptPark(gp *g) {
4338 status := readgstatus(gp)
4339 if status&^_Gscan != _Grunning {
4340 dumpgstatus(gp)
4341 throw("bad g status")
4342 }
4343
4344 if gp.asyncSafePoint {
4345
4346
4347
4348 f := findfunc(gp.sched.pc)
4349 if !f.valid() {
4350 throw("preempt at unknown pc")
4351 }
4352 if f.flag&abi.FuncFlagSPWrite != 0 {
4353 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4354 throw("preempt SPWRITE")
4355 }
4356 }
4357
4358
4359
4360
4361
4362
4363
4364 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4365 dropg()
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382 trace := traceAcquire()
4383 if trace.ok() {
4384 trace.GoPark(traceBlockPreempted, 0)
4385 }
4386 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4387 if trace.ok() {
4388 traceRelease(trace)
4389 }
4390 schedule()
4391 }
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407 func goyield() {
4408 checkTimeouts()
4409 mcall(goyield_m)
4410 }
4411
4412 func goyield_m(gp *g) {
4413 trace := traceAcquire()
4414 pp := gp.m.p.ptr()
4415 if trace.ok() {
4416
4417
4418
4419 trace.GoPreempt()
4420 }
4421 casgstatus(gp, _Grunning, _Grunnable)
4422 if trace.ok() {
4423 traceRelease(trace)
4424 }
4425 dropg()
4426 runqput(pp, gp, false)
4427 schedule()
4428 }
4429
4430
4431 func goexit1() {
4432 if raceenabled {
4433 if gp := getg(); gp.bubble != nil {
4434 racereleasemergeg(gp, gp.bubble.raceaddr())
4435 }
4436 racegoend()
4437 }
4438 trace := traceAcquire()
4439 if trace.ok() {
4440 trace.GoEnd()
4441 traceRelease(trace)
4442 }
4443 mcall(goexit0)
4444 }
4445
4446
4447 func goexit0(gp *g) {
4448 gdestroy(gp)
4449 schedule()
4450 }
4451
4452 func gdestroy(gp *g) {
4453 mp := getg().m
4454 pp := mp.p.ptr()
4455
4456 casgstatus(gp, _Grunning, _Gdead)
4457 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4458 if isSystemGoroutine(gp, false) {
4459 sched.ngsys.Add(-1)
4460 }
4461 gp.m = nil
4462 locked := gp.lockedm != 0
4463 gp.lockedm = 0
4464 mp.lockedg = 0
4465 gp.preemptStop = false
4466 gp.paniconfault = false
4467 gp._defer = nil
4468 gp._panic = nil
4469 gp.writebuf = nil
4470 gp.waitreason = waitReasonZero
4471 gp.param = nil
4472 gp.labels = nil
4473 gp.timer = nil
4474 gp.bubble = nil
4475
4476 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4477
4478
4479
4480 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4481 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4482 gcController.bgScanCredit.Add(scanCredit)
4483 gp.gcAssistBytes = 0
4484 }
4485
4486 dropg()
4487
4488 if GOARCH == "wasm" {
4489 gfput(pp, gp)
4490 return
4491 }
4492
4493 if locked && mp.lockedInt != 0 {
4494 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4495 if mp.isextra {
4496 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4497 }
4498 throw("exited a goroutine internally locked to the OS thread")
4499 }
4500 gfput(pp, gp)
4501 if locked {
4502
4503
4504
4505
4506
4507
4508 if GOOS != "plan9" {
4509 gogo(&mp.g0.sched)
4510 } else {
4511
4512
4513 mp.lockedExt = 0
4514 }
4515 }
4516 }
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526 func save(pc, sp, bp uintptr) {
4527 gp := getg()
4528
4529 if gp == gp.m.g0 || gp == gp.m.gsignal {
4530
4531
4532
4533
4534
4535 throw("save on system g not allowed")
4536 }
4537
4538 gp.sched.pc = pc
4539 gp.sched.sp = sp
4540 gp.sched.lr = 0
4541 gp.sched.bp = bp
4542
4543
4544
4545 if gp.sched.ctxt != nil {
4546 badctxt()
4547 }
4548 }
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574 func reentersyscall(pc, sp, bp uintptr) {
4575 trace := traceAcquire()
4576 gp := getg()
4577
4578
4579
4580 gp.m.locks++
4581
4582
4583
4584
4585
4586 gp.stackguard0 = stackPreempt
4587 gp.throwsplit = true
4588
4589
4590 save(pc, sp, bp)
4591 gp.syscallsp = sp
4592 gp.syscallpc = pc
4593 gp.syscallbp = bp
4594 casgstatus(gp, _Grunning, _Gsyscall)
4595 if staticLockRanking {
4596
4597
4598 save(pc, sp, bp)
4599 }
4600 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4601 systemstack(func() {
4602 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4603 throw("entersyscall")
4604 })
4605 }
4606 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4607 systemstack(func() {
4608 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4609 throw("entersyscall")
4610 })
4611 }
4612
4613 if trace.ok() {
4614 systemstack(func() {
4615 trace.GoSysCall()
4616 traceRelease(trace)
4617 })
4618
4619
4620
4621 save(pc, sp, bp)
4622 }
4623
4624 if sched.sysmonwait.Load() {
4625 systemstack(entersyscall_sysmon)
4626 save(pc, sp, bp)
4627 }
4628
4629 if gp.m.p.ptr().runSafePointFn != 0 {
4630
4631 systemstack(runSafePointFn)
4632 save(pc, sp, bp)
4633 }
4634
4635 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4636 pp := gp.m.p.ptr()
4637 pp.m = 0
4638 gp.m.oldp.set(pp)
4639 gp.m.p = 0
4640 atomic.Store(&pp.status, _Psyscall)
4641 if sched.gcwaiting.Load() {
4642 systemstack(entersyscall_gcwait)
4643 save(pc, sp, bp)
4644 }
4645
4646 gp.m.locks--
4647 }
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663 func entersyscall() {
4664
4665
4666
4667
4668 fp := getcallerfp()
4669 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4670 }
4671
4672 func entersyscall_sysmon() {
4673 lock(&sched.lock)
4674 if sched.sysmonwait.Load() {
4675 sched.sysmonwait.Store(false)
4676 notewakeup(&sched.sysmonnote)
4677 }
4678 unlock(&sched.lock)
4679 }
4680
4681 func entersyscall_gcwait() {
4682 gp := getg()
4683 pp := gp.m.oldp.ptr()
4684
4685 lock(&sched.lock)
4686 trace := traceAcquire()
4687 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4688 if trace.ok() {
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698 trace.ProcSteal(pp, true)
4699 traceRelease(trace)
4700 }
4701 pp.gcStopTime = nanotime()
4702 pp.syscalltick++
4703 if sched.stopwait--; sched.stopwait == 0 {
4704 notewakeup(&sched.stopnote)
4705 }
4706 } else if trace.ok() {
4707 traceRelease(trace)
4708 }
4709 unlock(&sched.lock)
4710 }
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724 func entersyscallblock() {
4725 gp := getg()
4726
4727 gp.m.locks++
4728 gp.throwsplit = true
4729 gp.stackguard0 = stackPreempt
4730 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4731 gp.m.p.ptr().syscalltick++
4732
4733
4734 pc := sys.GetCallerPC()
4735 sp := sys.GetCallerSP()
4736 bp := getcallerfp()
4737 save(pc, sp, bp)
4738 gp.syscallsp = gp.sched.sp
4739 gp.syscallpc = gp.sched.pc
4740 gp.syscallbp = gp.sched.bp
4741 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4742 sp1 := sp
4743 sp2 := gp.sched.sp
4744 sp3 := gp.syscallsp
4745 systemstack(func() {
4746 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4747 throw("entersyscallblock")
4748 })
4749 }
4750 casgstatus(gp, _Grunning, _Gsyscall)
4751 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4752 systemstack(func() {
4753 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4754 throw("entersyscallblock")
4755 })
4756 }
4757 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4758 systemstack(func() {
4759 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4760 throw("entersyscallblock")
4761 })
4762 }
4763
4764 systemstack(entersyscallblock_handoff)
4765
4766
4767 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4768
4769 gp.m.locks--
4770 }
4771
4772 func entersyscallblock_handoff() {
4773 trace := traceAcquire()
4774 if trace.ok() {
4775 trace.GoSysCall()
4776 traceRelease(trace)
4777 }
4778 handoffp(releasep())
4779 }
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801 func exitsyscall() {
4802 gp := getg()
4803
4804 gp.m.locks++
4805 if sys.GetCallerSP() > gp.syscallsp {
4806 throw("exitsyscall: syscall frame is no longer valid")
4807 }
4808
4809 gp.waitsince = 0
4810 oldp := gp.m.oldp.ptr()
4811 gp.m.oldp = 0
4812 if exitsyscallfast(oldp) {
4813
4814
4815 if goroutineProfile.active {
4816
4817
4818
4819 systemstack(func() {
4820 tryRecordGoroutineProfileWB(gp)
4821 })
4822 }
4823 trace := traceAcquire()
4824 if trace.ok() {
4825 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4826 systemstack(func() {
4827
4828
4829
4830
4831 trace.GoSysExit(lostP)
4832 if lostP {
4833
4834
4835
4836
4837 trace.GoStart()
4838 }
4839 })
4840 }
4841
4842 gp.m.p.ptr().syscalltick++
4843
4844 casgstatus(gp, _Gsyscall, _Grunning)
4845 if trace.ok() {
4846 traceRelease(trace)
4847 }
4848
4849
4850
4851 gp.syscallsp = 0
4852 gp.m.locks--
4853 if gp.preempt {
4854
4855 gp.stackguard0 = stackPreempt
4856 } else {
4857
4858 gp.stackguard0 = gp.stack.lo + stackGuard
4859 }
4860 gp.throwsplit = false
4861
4862 if sched.disable.user && !schedEnabled(gp) {
4863
4864 Gosched()
4865 }
4866
4867 return
4868 }
4869
4870 gp.m.locks--
4871
4872
4873 mcall(exitsyscall0)
4874
4875
4876
4877
4878
4879
4880
4881 gp.syscallsp = 0
4882 gp.m.p.ptr().syscalltick++
4883 gp.throwsplit = false
4884 }
4885
4886
4887 func exitsyscallfast(oldp *p) bool {
4888
4889 if sched.stopwait == freezeStopWait {
4890 return false
4891 }
4892
4893
4894 trace := traceAcquire()
4895 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4896
4897 wirep(oldp)
4898 exitsyscallfast_reacquired(trace)
4899 if trace.ok() {
4900 traceRelease(trace)
4901 }
4902 return true
4903 }
4904 if trace.ok() {
4905 traceRelease(trace)
4906 }
4907
4908
4909 if sched.pidle != 0 {
4910 var ok bool
4911 systemstack(func() {
4912 ok = exitsyscallfast_pidle()
4913 })
4914 if ok {
4915 return true
4916 }
4917 }
4918 return false
4919 }
4920
4921
4922
4923
4924
4925
4926 func exitsyscallfast_reacquired(trace traceLocker) {
4927 gp := getg()
4928 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4929 if trace.ok() {
4930
4931
4932
4933 systemstack(func() {
4934
4935
4936 trace.ProcSteal(gp.m.p.ptr(), true)
4937 trace.ProcStart()
4938 })
4939 }
4940 gp.m.p.ptr().syscalltick++
4941 }
4942 }
4943
4944 func exitsyscallfast_pidle() bool {
4945 lock(&sched.lock)
4946 pp, _ := pidleget(0)
4947 if pp != nil && sched.sysmonwait.Load() {
4948 sched.sysmonwait.Store(false)
4949 notewakeup(&sched.sysmonnote)
4950 }
4951 unlock(&sched.lock)
4952 if pp != nil {
4953 acquirep(pp)
4954 return true
4955 }
4956 return false
4957 }
4958
4959
4960
4961
4962
4963
4964
4965 func exitsyscall0(gp *g) {
4966 var trace traceLocker
4967 traceExitingSyscall()
4968 trace = traceAcquire()
4969 casgstatus(gp, _Gsyscall, _Grunnable)
4970 traceExitedSyscall()
4971 if trace.ok() {
4972
4973
4974
4975
4976 trace.GoSysExit(true)
4977 traceRelease(trace)
4978 }
4979 dropg()
4980 lock(&sched.lock)
4981 var pp *p
4982 if schedEnabled(gp) {
4983 pp, _ = pidleget(0)
4984 }
4985 var locked bool
4986 if pp == nil {
4987 globrunqput(gp)
4988
4989
4990
4991
4992
4993
4994 locked = gp.lockedm != 0
4995 } else if sched.sysmonwait.Load() {
4996 sched.sysmonwait.Store(false)
4997 notewakeup(&sched.sysmonnote)
4998 }
4999 unlock(&sched.lock)
5000 if pp != nil {
5001 acquirep(pp)
5002 execute(gp, false)
5003 }
5004 if locked {
5005
5006
5007
5008
5009 stoplockedm()
5010 execute(gp, false)
5011 }
5012 stopm()
5013 schedule()
5014 }
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028 func syscall_runtime_BeforeFork() {
5029 gp := getg().m.curg
5030
5031
5032
5033
5034 gp.m.locks++
5035 sigsave(&gp.m.sigmask)
5036 sigblock(false)
5037
5038
5039
5040
5041
5042 gp.stackguard0 = stackFork
5043 }
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057 func syscall_runtime_AfterFork() {
5058 gp := getg().m.curg
5059
5060
5061 gp.stackguard0 = gp.stack.lo + stackGuard
5062
5063 msigrestore(gp.m.sigmask)
5064
5065 gp.m.locks--
5066 }
5067
5068
5069
5070 var inForkedChild bool
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091 func syscall_runtime_AfterForkInChild() {
5092
5093
5094
5095
5096 inForkedChild = true
5097
5098 clearSignalHandlers()
5099
5100
5101
5102 msigrestore(getg().m.sigmask)
5103
5104 inForkedChild = false
5105 }
5106
5107
5108
5109
5110 var pendingPreemptSignals atomic.Int32
5111
5112
5113
5114
5115 func syscall_runtime_BeforeExec() {
5116
5117 execLock.lock()
5118
5119
5120
5121 if GOOS == "darwin" || GOOS == "ios" {
5122 for pendingPreemptSignals.Load() > 0 {
5123 osyield()
5124 }
5125 }
5126 }
5127
5128
5129
5130
5131 func syscall_runtime_AfterExec() {
5132 execLock.unlock()
5133 }
5134
5135
5136 func malg(stacksize int32) *g {
5137 newg := new(g)
5138 if stacksize >= 0 {
5139 stacksize = round2(stackSystem + stacksize)
5140 systemstack(func() {
5141 newg.stack = stackalloc(uint32(stacksize))
5142 if valgrindenabled {
5143 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5144 }
5145 })
5146 newg.stackguard0 = newg.stack.lo + stackGuard
5147 newg.stackguard1 = ^uintptr(0)
5148
5149
5150 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5151 }
5152 return newg
5153 }
5154
5155
5156
5157
5158 func newproc(fn *funcval) {
5159 gp := getg()
5160 pc := sys.GetCallerPC()
5161 systemstack(func() {
5162 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5163
5164 pp := getg().m.p.ptr()
5165 runqput(pp, newg, true)
5166
5167 if mainStarted {
5168 wakep()
5169 }
5170 })
5171 }
5172
5173
5174
5175
5176 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5177 if fn == nil {
5178 fatal("go of nil func value")
5179 }
5180
5181 mp := acquirem()
5182 pp := mp.p.ptr()
5183 newg := gfget(pp)
5184 if newg == nil {
5185 newg = malg(stackMin)
5186 casgstatus(newg, _Gidle, _Gdead)
5187 allgadd(newg)
5188 }
5189 if newg.stack.hi == 0 {
5190 throw("newproc1: newg missing stack")
5191 }
5192
5193 if readgstatus(newg) != _Gdead {
5194 throw("newproc1: new g is not Gdead")
5195 }
5196
5197 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5198 totalSize = alignUp(totalSize, sys.StackAlign)
5199 sp := newg.stack.hi - totalSize
5200 if usesLR {
5201
5202 *(*uintptr)(unsafe.Pointer(sp)) = 0
5203 prepGoExitFrame(sp)
5204 }
5205 if GOARCH == "arm64" {
5206
5207 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5208 }
5209
5210 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5211 newg.sched.sp = sp
5212 newg.stktopsp = sp
5213 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5214 newg.sched.g = guintptr(unsafe.Pointer(newg))
5215 gostartcallfn(&newg.sched, fn)
5216 newg.parentGoid = callergp.goid
5217 newg.gopc = callerpc
5218 newg.ancestors = saveAncestors(callergp)
5219 newg.startpc = fn.fn
5220 newg.runningCleanups.Store(false)
5221 if isSystemGoroutine(newg, false) {
5222 sched.ngsys.Add(1)
5223 } else {
5224
5225 newg.bubble = callergp.bubble
5226 if mp.curg != nil {
5227 newg.labels = mp.curg.labels
5228 }
5229 if goroutineProfile.active {
5230
5231
5232
5233
5234
5235 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5236 }
5237 }
5238
5239 newg.trackingSeq = uint8(cheaprand())
5240 if newg.trackingSeq%gTrackingPeriod == 0 {
5241 newg.tracking = true
5242 }
5243 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5244
5245
5246 trace := traceAcquire()
5247 var status uint32 = _Grunnable
5248 if parked {
5249 status = _Gwaiting
5250 newg.waitreason = waitreason
5251 }
5252 if pp.goidcache == pp.goidcacheend {
5253
5254
5255
5256 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5257 pp.goidcache -= _GoidCacheBatch - 1
5258 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5259 }
5260 newg.goid = pp.goidcache
5261 casgstatus(newg, _Gdead, status)
5262 pp.goidcache++
5263 newg.trace.reset()
5264 if trace.ok() {
5265 trace.GoCreate(newg, newg.startpc, parked)
5266 traceRelease(trace)
5267 }
5268
5269
5270 if raceenabled {
5271 newg.racectx = racegostart(callerpc)
5272 newg.raceignore = 0
5273 if newg.labels != nil {
5274
5275
5276 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5277 }
5278 }
5279 releasem(mp)
5280
5281 return newg
5282 }
5283
5284
5285
5286
5287 func saveAncestors(callergp *g) *[]ancestorInfo {
5288
5289 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5290 return nil
5291 }
5292 var callerAncestors []ancestorInfo
5293 if callergp.ancestors != nil {
5294 callerAncestors = *callergp.ancestors
5295 }
5296 n := int32(len(callerAncestors)) + 1
5297 if n > debug.tracebackancestors {
5298 n = debug.tracebackancestors
5299 }
5300 ancestors := make([]ancestorInfo, n)
5301 copy(ancestors[1:], callerAncestors)
5302
5303 var pcs [tracebackInnerFrames]uintptr
5304 npcs := gcallers(callergp, 0, pcs[:])
5305 ipcs := make([]uintptr, npcs)
5306 copy(ipcs, pcs[:])
5307 ancestors[0] = ancestorInfo{
5308 pcs: ipcs,
5309 goid: callergp.goid,
5310 gopc: callergp.gopc,
5311 }
5312
5313 ancestorsp := new([]ancestorInfo)
5314 *ancestorsp = ancestors
5315 return ancestorsp
5316 }
5317
5318
5319
5320 func gfput(pp *p, gp *g) {
5321 if readgstatus(gp) != _Gdead {
5322 throw("gfput: bad status (not Gdead)")
5323 }
5324
5325 stksize := gp.stack.hi - gp.stack.lo
5326
5327 if stksize != uintptr(startingStackSize) {
5328
5329 stackfree(gp.stack)
5330 gp.stack.lo = 0
5331 gp.stack.hi = 0
5332 gp.stackguard0 = 0
5333 if valgrindenabled {
5334 valgrindDeregisterStack(gp.valgrindStackID)
5335 gp.valgrindStackID = 0
5336 }
5337 }
5338
5339 pp.gFree.push(gp)
5340 if pp.gFree.size >= 64 {
5341 var (
5342 stackQ gQueue
5343 noStackQ gQueue
5344 )
5345 for pp.gFree.size >= 32 {
5346 gp := pp.gFree.pop()
5347 if gp.stack.lo == 0 {
5348 noStackQ.push(gp)
5349 } else {
5350 stackQ.push(gp)
5351 }
5352 }
5353 lock(&sched.gFree.lock)
5354 sched.gFree.noStack.pushAll(noStackQ)
5355 sched.gFree.stack.pushAll(stackQ)
5356 unlock(&sched.gFree.lock)
5357 }
5358 }
5359
5360
5361
5362 func gfget(pp *p) *g {
5363 retry:
5364 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5365 lock(&sched.gFree.lock)
5366
5367 for pp.gFree.size < 32 {
5368
5369 gp := sched.gFree.stack.pop()
5370 if gp == nil {
5371 gp = sched.gFree.noStack.pop()
5372 if gp == nil {
5373 break
5374 }
5375 }
5376 pp.gFree.push(gp)
5377 }
5378 unlock(&sched.gFree.lock)
5379 goto retry
5380 }
5381 gp := pp.gFree.pop()
5382 if gp == nil {
5383 return nil
5384 }
5385 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5386
5387
5388
5389 systemstack(func() {
5390 stackfree(gp.stack)
5391 gp.stack.lo = 0
5392 gp.stack.hi = 0
5393 gp.stackguard0 = 0
5394 if valgrindenabled {
5395 valgrindDeregisterStack(gp.valgrindStackID)
5396 gp.valgrindStackID = 0
5397 }
5398 })
5399 }
5400 if gp.stack.lo == 0 {
5401
5402 systemstack(func() {
5403 gp.stack = stackalloc(startingStackSize)
5404 if valgrindenabled {
5405 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5406 }
5407 })
5408 gp.stackguard0 = gp.stack.lo + stackGuard
5409 } else {
5410 if raceenabled {
5411 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5412 }
5413 if msanenabled {
5414 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5415 }
5416 if asanenabled {
5417 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5418 }
5419 }
5420 return gp
5421 }
5422
5423
5424 func gfpurge(pp *p) {
5425 var (
5426 stackQ gQueue
5427 noStackQ gQueue
5428 )
5429 for !pp.gFree.empty() {
5430 gp := pp.gFree.pop()
5431 if gp.stack.lo == 0 {
5432 noStackQ.push(gp)
5433 } else {
5434 stackQ.push(gp)
5435 }
5436 }
5437 lock(&sched.gFree.lock)
5438 sched.gFree.noStack.pushAll(noStackQ)
5439 sched.gFree.stack.pushAll(stackQ)
5440 unlock(&sched.gFree.lock)
5441 }
5442
5443
5444 func Breakpoint() {
5445 breakpoint()
5446 }
5447
5448
5449
5450
5451
5452
5453 func dolockOSThread() {
5454 if GOARCH == "wasm" {
5455 return
5456 }
5457 gp := getg()
5458 gp.m.lockedg.set(gp)
5459 gp.lockedm.set(gp.m)
5460 }
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478 func LockOSThread() {
5479 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5480
5481
5482
5483 startTemplateThread()
5484 }
5485 gp := getg()
5486 gp.m.lockedExt++
5487 if gp.m.lockedExt == 0 {
5488 gp.m.lockedExt--
5489 panic("LockOSThread nesting overflow")
5490 }
5491 dolockOSThread()
5492 }
5493
5494
5495 func lockOSThread() {
5496 getg().m.lockedInt++
5497 dolockOSThread()
5498 }
5499
5500
5501
5502
5503
5504
5505 func dounlockOSThread() {
5506 if GOARCH == "wasm" {
5507 return
5508 }
5509 gp := getg()
5510 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5511 return
5512 }
5513 gp.m.lockedg = 0
5514 gp.lockedm = 0
5515 }
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531 func UnlockOSThread() {
5532 gp := getg()
5533 if gp.m.lockedExt == 0 {
5534 return
5535 }
5536 gp.m.lockedExt--
5537 dounlockOSThread()
5538 }
5539
5540
5541 func unlockOSThread() {
5542 gp := getg()
5543 if gp.m.lockedInt == 0 {
5544 systemstack(badunlockosthread)
5545 }
5546 gp.m.lockedInt--
5547 dounlockOSThread()
5548 }
5549
5550 func badunlockosthread() {
5551 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5552 }
5553
5554 func gcount() int32 {
5555 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size - sched.ngsys.Load()
5556 for _, pp := range allp {
5557 n -= pp.gFree.size
5558 }
5559
5560
5561
5562 if n < 1 {
5563 n = 1
5564 }
5565 return n
5566 }
5567
5568 func mcount() int32 {
5569 return int32(sched.mnext - sched.nmfreed)
5570 }
5571
5572 var prof struct {
5573 signalLock atomic.Uint32
5574
5575
5576
5577 hz atomic.Int32
5578 }
5579
5580 func _System() { _System() }
5581 func _ExternalCode() { _ExternalCode() }
5582 func _LostExternalCode() { _LostExternalCode() }
5583 func _GC() { _GC() }
5584 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5585 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5586 func _VDSO() { _VDSO() }
5587
5588
5589
5590
5591
5592 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5593 if prof.hz.Load() == 0 {
5594 return
5595 }
5596
5597
5598
5599
5600 if mp != nil && mp.profilehz == 0 {
5601 return
5602 }
5603
5604
5605
5606
5607
5608
5609
5610 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5611 if f := findfunc(pc); f.valid() {
5612 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5613 cpuprof.lostAtomic++
5614 return
5615 }
5616 }
5617 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5618
5619
5620
5621 cpuprof.lostAtomic++
5622 return
5623 }
5624 }
5625
5626
5627
5628
5629
5630
5631
5632 getg().m.mallocing++
5633
5634 var u unwinder
5635 var stk [maxCPUProfStack]uintptr
5636 n := 0
5637 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5638 cgoOff := 0
5639
5640
5641
5642
5643
5644 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5645 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5646 cgoOff++
5647 }
5648 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5649 mp.cgoCallers[0] = 0
5650 }
5651
5652
5653 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5654 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5655
5656
5657 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5658 } else if mp != nil && mp.vdsoSP != 0 {
5659
5660
5661 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5662 } else {
5663 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5664 }
5665 n += tracebackPCs(&u, 0, stk[n:])
5666
5667 if n <= 0 {
5668
5669
5670 n = 2
5671 if inVDSOPage(pc) {
5672 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5673 } else if pc > firstmoduledata.etext {
5674
5675 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5676 }
5677 stk[0] = pc
5678 if mp.preemptoff != "" {
5679 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5680 } else {
5681 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5682 }
5683 }
5684
5685 if prof.hz.Load() != 0 {
5686
5687
5688
5689 var tagPtr *unsafe.Pointer
5690 if gp != nil && gp.m != nil && gp.m.curg != nil {
5691 tagPtr = &gp.m.curg.labels
5692 }
5693 cpuprof.add(tagPtr, stk[:n])
5694
5695 gprof := gp
5696 var mp *m
5697 var pp *p
5698 if gp != nil && gp.m != nil {
5699 if gp.m.curg != nil {
5700 gprof = gp.m.curg
5701 }
5702 mp = gp.m
5703 pp = gp.m.p.ptr()
5704 }
5705 traceCPUSample(gprof, mp, pp, stk[:n])
5706 }
5707 getg().m.mallocing--
5708 }
5709
5710
5711
5712 func setcpuprofilerate(hz int32) {
5713
5714 if hz < 0 {
5715 hz = 0
5716 }
5717
5718
5719
5720 gp := getg()
5721 gp.m.locks++
5722
5723
5724
5725
5726 setThreadCPUProfiler(0)
5727
5728 for !prof.signalLock.CompareAndSwap(0, 1) {
5729 osyield()
5730 }
5731 if prof.hz.Load() != hz {
5732 setProcessCPUProfiler(hz)
5733 prof.hz.Store(hz)
5734 }
5735 prof.signalLock.Store(0)
5736
5737 lock(&sched.lock)
5738 sched.profilehz = hz
5739 unlock(&sched.lock)
5740
5741 if hz != 0 {
5742 setThreadCPUProfiler(hz)
5743 }
5744
5745 gp.m.locks--
5746 }
5747
5748
5749
5750 func (pp *p) init(id int32) {
5751 pp.id = id
5752 pp.status = _Pgcstop
5753 pp.sudogcache = pp.sudogbuf[:0]
5754 pp.deferpool = pp.deferpoolbuf[:0]
5755 pp.wbBuf.reset()
5756 if pp.mcache == nil {
5757 if id == 0 {
5758 if mcache0 == nil {
5759 throw("missing mcache?")
5760 }
5761
5762
5763 pp.mcache = mcache0
5764 } else {
5765 pp.mcache = allocmcache()
5766 }
5767 }
5768 if raceenabled && pp.raceprocctx == 0 {
5769 if id == 0 {
5770 pp.raceprocctx = raceprocctx0
5771 raceprocctx0 = 0
5772 } else {
5773 pp.raceprocctx = raceproccreate()
5774 }
5775 }
5776 lockInit(&pp.timers.mu, lockRankTimers)
5777
5778
5779
5780 timerpMask.set(id)
5781
5782
5783 idlepMask.clear(id)
5784 }
5785
5786
5787
5788
5789
5790 func (pp *p) destroy() {
5791 assertLockHeld(&sched.lock)
5792 assertWorldStopped()
5793
5794
5795 for pp.runqhead != pp.runqtail {
5796
5797 pp.runqtail--
5798 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5799
5800 globrunqputhead(gp)
5801 }
5802 if pp.runnext != 0 {
5803 globrunqputhead(pp.runnext.ptr())
5804 pp.runnext = 0
5805 }
5806
5807
5808 getg().m.p.ptr().timers.take(&pp.timers)
5809
5810
5811 if gcphase != _GCoff {
5812 wbBufFlush1(pp)
5813 pp.gcw.dispose()
5814 }
5815 clear(pp.sudogbuf[:])
5816 pp.sudogcache = pp.sudogbuf[:0]
5817 pp.pinnerCache = nil
5818 clear(pp.deferpoolbuf[:])
5819 pp.deferpool = pp.deferpoolbuf[:0]
5820 systemstack(func() {
5821 for i := 0; i < pp.mspancache.len; i++ {
5822
5823 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5824 }
5825 pp.mspancache.len = 0
5826 lock(&mheap_.lock)
5827 pp.pcache.flush(&mheap_.pages)
5828 unlock(&mheap_.lock)
5829 })
5830 freemcache(pp.mcache)
5831 pp.mcache = nil
5832 gfpurge(pp)
5833 if raceenabled {
5834 if pp.timers.raceCtx != 0 {
5835
5836
5837
5838
5839
5840 mp := getg().m
5841 phold := mp.p.ptr()
5842 mp.p.set(pp)
5843
5844 racectxend(pp.timers.raceCtx)
5845 pp.timers.raceCtx = 0
5846
5847 mp.p.set(phold)
5848 }
5849 raceprocdestroy(pp.raceprocctx)
5850 pp.raceprocctx = 0
5851 }
5852 pp.gcAssistTime = 0
5853 gcCleanups.queued += pp.cleanupsQueued
5854 pp.cleanupsQueued = 0
5855 pp.status = _Pdead
5856 }
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866 func procresize(nprocs int32) *p {
5867 assertLockHeld(&sched.lock)
5868 assertWorldStopped()
5869
5870 old := gomaxprocs
5871 if old < 0 || nprocs <= 0 {
5872 throw("procresize: invalid arg")
5873 }
5874 trace := traceAcquire()
5875 if trace.ok() {
5876 trace.Gomaxprocs(nprocs)
5877 traceRelease(trace)
5878 }
5879
5880
5881 now := nanotime()
5882 if sched.procresizetime != 0 {
5883 sched.totaltime += int64(old) * (now - sched.procresizetime)
5884 }
5885 sched.procresizetime = now
5886
5887 maskWords := (nprocs + 31) / 32
5888
5889
5890 if nprocs > int32(len(allp)) {
5891
5892
5893 lock(&allpLock)
5894 if nprocs <= int32(cap(allp)) {
5895 allp = allp[:nprocs]
5896 } else {
5897 nallp := make([]*p, nprocs)
5898
5899
5900 copy(nallp, allp[:cap(allp)])
5901 allp = nallp
5902 }
5903
5904 if maskWords <= int32(cap(idlepMask)) {
5905 idlepMask = idlepMask[:maskWords]
5906 timerpMask = timerpMask[:maskWords]
5907 } else {
5908 nidlepMask := make([]uint32, maskWords)
5909
5910 copy(nidlepMask, idlepMask)
5911 idlepMask = nidlepMask
5912
5913 ntimerpMask := make([]uint32, maskWords)
5914 copy(ntimerpMask, timerpMask)
5915 timerpMask = ntimerpMask
5916 }
5917 unlock(&allpLock)
5918 }
5919
5920
5921 for i := old; i < nprocs; i++ {
5922 pp := allp[i]
5923 if pp == nil {
5924 pp = new(p)
5925 }
5926 pp.init(i)
5927 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5928 }
5929
5930 gp := getg()
5931 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5932
5933 gp.m.p.ptr().status = _Prunning
5934 gp.m.p.ptr().mcache.prepareForSweep()
5935 } else {
5936
5937
5938
5939
5940
5941 if gp.m.p != 0 {
5942 trace := traceAcquire()
5943 if trace.ok() {
5944
5945
5946
5947 trace.GoSched()
5948 trace.ProcStop(gp.m.p.ptr())
5949 traceRelease(trace)
5950 }
5951 gp.m.p.ptr().m = 0
5952 }
5953 gp.m.p = 0
5954 pp := allp[0]
5955 pp.m = 0
5956 pp.status = _Pidle
5957 acquirep(pp)
5958 trace := traceAcquire()
5959 if trace.ok() {
5960 trace.GoStart()
5961 traceRelease(trace)
5962 }
5963 }
5964
5965
5966 mcache0 = nil
5967
5968
5969 for i := nprocs; i < old; i++ {
5970 pp := allp[i]
5971 pp.destroy()
5972
5973 }
5974
5975
5976 if int32(len(allp)) != nprocs {
5977 lock(&allpLock)
5978 allp = allp[:nprocs]
5979 idlepMask = idlepMask[:maskWords]
5980 timerpMask = timerpMask[:maskWords]
5981 unlock(&allpLock)
5982 }
5983
5984 var runnablePs *p
5985 for i := nprocs - 1; i >= 0; i-- {
5986 pp := allp[i]
5987 if gp.m.p.ptr() == pp {
5988 continue
5989 }
5990 pp.status = _Pidle
5991 if runqempty(pp) {
5992 pidleput(pp, now)
5993 } else {
5994 pp.m.set(mget())
5995 pp.link.set(runnablePs)
5996 runnablePs = pp
5997 }
5998 }
5999 stealOrder.reset(uint32(nprocs))
6000 var int32p *int32 = &gomaxprocs
6001 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6002 if old != nprocs {
6003
6004 gcCPULimiter.resetCapacity(now, nprocs)
6005 }
6006 return runnablePs
6007 }
6008
6009
6010
6011
6012
6013
6014
6015 func acquirep(pp *p) {
6016
6017 wirep(pp)
6018
6019
6020
6021
6022
6023 pp.mcache.prepareForSweep()
6024
6025 trace := traceAcquire()
6026 if trace.ok() {
6027 trace.ProcStart()
6028 traceRelease(trace)
6029 }
6030 }
6031
6032
6033
6034
6035
6036
6037
6038 func wirep(pp *p) {
6039 gp := getg()
6040
6041 if gp.m.p != 0 {
6042
6043
6044 systemstack(func() {
6045 throw("wirep: already in go")
6046 })
6047 }
6048 if pp.m != 0 || pp.status != _Pidle {
6049
6050
6051 systemstack(func() {
6052 id := int64(0)
6053 if pp.m != 0 {
6054 id = pp.m.ptr().id
6055 }
6056 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6057 throw("wirep: invalid p state")
6058 })
6059 }
6060 gp.m.p.set(pp)
6061 pp.m.set(gp.m)
6062 pp.status = _Prunning
6063 }
6064
6065
6066 func releasep() *p {
6067 trace := traceAcquire()
6068 if trace.ok() {
6069 trace.ProcStop(getg().m.p.ptr())
6070 traceRelease(trace)
6071 }
6072 return releasepNoTrace()
6073 }
6074
6075
6076 func releasepNoTrace() *p {
6077 gp := getg()
6078
6079 if gp.m.p == 0 {
6080 throw("releasep: invalid arg")
6081 }
6082 pp := gp.m.p.ptr()
6083 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6084 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6085 throw("releasep: invalid p state")
6086 }
6087 gp.m.p = 0
6088 pp.m = 0
6089 pp.status = _Pidle
6090 return pp
6091 }
6092
6093 func incidlelocked(v int32) {
6094 lock(&sched.lock)
6095 sched.nmidlelocked += v
6096 if v > 0 {
6097 checkdead()
6098 }
6099 unlock(&sched.lock)
6100 }
6101
6102
6103
6104
6105 func checkdead() {
6106 assertLockHeld(&sched.lock)
6107
6108
6109
6110
6111
6112
6113 if (islibrary || isarchive) && GOARCH != "wasm" {
6114 return
6115 }
6116
6117
6118
6119
6120
6121 if panicking.Load() > 0 {
6122 return
6123 }
6124
6125
6126
6127
6128
6129 var run0 int32
6130 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6131 run0 = 1
6132 }
6133
6134 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6135 if run > run0 {
6136 return
6137 }
6138 if run < 0 {
6139 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6140 unlock(&sched.lock)
6141 throw("checkdead: inconsistent counts")
6142 }
6143
6144 grunning := 0
6145 forEachG(func(gp *g) {
6146 if isSystemGoroutine(gp, false) {
6147 return
6148 }
6149 s := readgstatus(gp)
6150 switch s &^ _Gscan {
6151 case _Gwaiting,
6152 _Gpreempted:
6153 grunning++
6154 case _Grunnable,
6155 _Grunning,
6156 _Gsyscall:
6157 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6158 unlock(&sched.lock)
6159 throw("checkdead: runnable g")
6160 }
6161 })
6162 if grunning == 0 {
6163 unlock(&sched.lock)
6164 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6165 }
6166
6167
6168 if faketime != 0 {
6169 if when := timeSleepUntil(); when < maxWhen {
6170 faketime = when
6171
6172
6173 pp, _ := pidleget(faketime)
6174 if pp == nil {
6175
6176
6177 unlock(&sched.lock)
6178 throw("checkdead: no p for timer")
6179 }
6180 mp := mget()
6181 if mp == nil {
6182
6183
6184 unlock(&sched.lock)
6185 throw("checkdead: no m for timer")
6186 }
6187
6188
6189
6190 sched.nmspinning.Add(1)
6191 mp.spinning = true
6192 mp.nextp.set(pp)
6193 notewakeup(&mp.park)
6194 return
6195 }
6196 }
6197
6198
6199 for _, pp := range allp {
6200 if len(pp.timers.heap) > 0 {
6201 return
6202 }
6203 }
6204
6205 unlock(&sched.lock)
6206 fatal("all goroutines are asleep - deadlock!")
6207 }
6208
6209
6210
6211
6212
6213
6214 var forcegcperiod int64 = 2 * 60 * 1e9
6215
6216
6217
6218 var needSysmonWorkaround bool = false
6219
6220
6221
6222
6223 const haveSysmon = GOARCH != "wasm"
6224
6225
6226
6227
6228 func sysmon() {
6229 lock(&sched.lock)
6230 sched.nmsys++
6231 checkdead()
6232 unlock(&sched.lock)
6233
6234 lastgomaxprocs := int64(0)
6235 lasttrace := int64(0)
6236 idle := 0
6237 delay := uint32(0)
6238
6239 for {
6240 if idle == 0 {
6241 delay = 20
6242 } else if idle > 50 {
6243 delay *= 2
6244 }
6245 if delay > 10*1000 {
6246 delay = 10 * 1000
6247 }
6248 usleep(delay)
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265 now := nanotime()
6266 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6267 lock(&sched.lock)
6268 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6269 syscallWake := false
6270 next := timeSleepUntil()
6271 if next > now {
6272 sched.sysmonwait.Store(true)
6273 unlock(&sched.lock)
6274
6275
6276 sleep := forcegcperiod / 2
6277 if next-now < sleep {
6278 sleep = next - now
6279 }
6280 shouldRelax := sleep >= osRelaxMinNS
6281 if shouldRelax {
6282 osRelax(true)
6283 }
6284 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6285 if shouldRelax {
6286 osRelax(false)
6287 }
6288 lock(&sched.lock)
6289 sched.sysmonwait.Store(false)
6290 noteclear(&sched.sysmonnote)
6291 }
6292 if syscallWake {
6293 idle = 0
6294 delay = 20
6295 }
6296 }
6297 unlock(&sched.lock)
6298 }
6299
6300 lock(&sched.sysmonlock)
6301
6302
6303 now = nanotime()
6304
6305
6306 if *cgo_yield != nil {
6307 asmcgocall(*cgo_yield, nil)
6308 }
6309
6310 lastpoll := sched.lastpoll.Load()
6311 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6312 sched.lastpoll.CompareAndSwap(lastpoll, now)
6313 list, delta := netpoll(0)
6314 if !list.empty() {
6315
6316
6317
6318
6319
6320
6321
6322 incidlelocked(-1)
6323 injectglist(&list)
6324 incidlelocked(1)
6325 netpollAdjustWaiters(delta)
6326 }
6327 }
6328 if GOOS == "netbsd" && needSysmonWorkaround {
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344 if next := timeSleepUntil(); next < now {
6345 startm(nil, false, false)
6346 }
6347 }
6348
6349 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6350 sysmonUpdateGOMAXPROCS()
6351 lastgomaxprocs = now
6352 }
6353 if scavenger.sysmonWake.Load() != 0 {
6354
6355 scavenger.wake()
6356 }
6357
6358
6359 if retake(now) != 0 {
6360 idle = 0
6361 } else {
6362 idle++
6363 }
6364
6365 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6366 lock(&forcegc.lock)
6367 forcegc.idle.Store(false)
6368 var list gList
6369 list.push(forcegc.g)
6370 injectglist(&list)
6371 unlock(&forcegc.lock)
6372 }
6373 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6374 lasttrace = now
6375 schedtrace(debug.scheddetail > 0)
6376 }
6377 unlock(&sched.sysmonlock)
6378 }
6379 }
6380
6381 type sysmontick struct {
6382 schedtick uint32
6383 syscalltick uint32
6384 schedwhen int64
6385 syscallwhen int64
6386 }
6387
6388
6389
6390 const forcePreemptNS = 10 * 1000 * 1000
6391
6392 func retake(now int64) uint32 {
6393 n := 0
6394
6395
6396 lock(&allpLock)
6397
6398
6399
6400 for i := 0; i < len(allp); i++ {
6401 pp := allp[i]
6402 if pp == nil {
6403
6404
6405 continue
6406 }
6407 pd := &pp.sysmontick
6408 s := pp.status
6409 sysretake := false
6410 if s == _Prunning || s == _Psyscall {
6411
6412
6413
6414
6415 t := int64(pp.schedtick)
6416 if int64(pd.schedtick) != t {
6417 pd.schedtick = uint32(t)
6418 pd.schedwhen = now
6419 } else if pd.schedwhen+forcePreemptNS <= now {
6420 preemptone(pp)
6421
6422
6423 sysretake = true
6424 }
6425 }
6426 if s == _Psyscall {
6427
6428 t := int64(pp.syscalltick)
6429 if !sysretake && int64(pd.syscalltick) != t {
6430 pd.syscalltick = uint32(t)
6431 pd.syscallwhen = now
6432 continue
6433 }
6434
6435
6436
6437 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6438 continue
6439 }
6440
6441 unlock(&allpLock)
6442
6443
6444
6445
6446 incidlelocked(-1)
6447 trace := traceAcquire()
6448 if atomic.Cas(&pp.status, s, _Pidle) {
6449 if trace.ok() {
6450 trace.ProcSteal(pp, false)
6451 traceRelease(trace)
6452 }
6453 n++
6454 pp.syscalltick++
6455 handoffp(pp)
6456 } else if trace.ok() {
6457 traceRelease(trace)
6458 }
6459 incidlelocked(1)
6460 lock(&allpLock)
6461 }
6462 }
6463 unlock(&allpLock)
6464 return uint32(n)
6465 }
6466
6467
6468
6469
6470
6471
6472 func preemptall() bool {
6473 res := false
6474 for _, pp := range allp {
6475 if pp.status != _Prunning {
6476 continue
6477 }
6478 if preemptone(pp) {
6479 res = true
6480 }
6481 }
6482 return res
6483 }
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495 func preemptone(pp *p) bool {
6496 mp := pp.m.ptr()
6497 if mp == nil || mp == getg().m {
6498 return false
6499 }
6500 gp := mp.curg
6501 if gp == nil || gp == mp.g0 {
6502 return false
6503 }
6504
6505 gp.preempt = true
6506
6507
6508
6509
6510
6511 gp.stackguard0 = stackPreempt
6512
6513
6514 if preemptMSupported && debug.asyncpreemptoff == 0 {
6515 pp.preempt = true
6516 preemptM(mp)
6517 }
6518
6519 return true
6520 }
6521
6522 var starttime int64
6523
6524 func schedtrace(detailed bool) {
6525 now := nanotime()
6526 if starttime == 0 {
6527 starttime = now
6528 }
6529
6530 lock(&sched.lock)
6531 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6532 if detailed {
6533 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6534 }
6535
6536
6537
6538 for i, pp := range allp {
6539 h := atomic.Load(&pp.runqhead)
6540 t := atomic.Load(&pp.runqtail)
6541 if detailed {
6542 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6543 mp := pp.m.ptr()
6544 if mp != nil {
6545 print(mp.id)
6546 } else {
6547 print("nil")
6548 }
6549 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6550 } else {
6551
6552
6553 print(" ")
6554 if i == 0 {
6555 print("[ ")
6556 }
6557 print(t - h)
6558 if i == len(allp)-1 {
6559 print(" ]")
6560 }
6561 }
6562 }
6563
6564 if !detailed {
6565
6566 print(" schedticks=[ ")
6567 for _, pp := range allp {
6568 print(pp.schedtick)
6569 print(" ")
6570 }
6571 print("]\n")
6572 }
6573
6574 if !detailed {
6575 unlock(&sched.lock)
6576 return
6577 }
6578
6579 for mp := allm; mp != nil; mp = mp.alllink {
6580 pp := mp.p.ptr()
6581 print(" M", mp.id, ": p=")
6582 if pp != nil {
6583 print(pp.id)
6584 } else {
6585 print("nil")
6586 }
6587 print(" curg=")
6588 if mp.curg != nil {
6589 print(mp.curg.goid)
6590 } else {
6591 print("nil")
6592 }
6593 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6594 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6595 print(lockedg.goid)
6596 } else {
6597 print("nil")
6598 }
6599 print("\n")
6600 }
6601
6602 forEachG(func(gp *g) {
6603 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6604 if gp.m != nil {
6605 print(gp.m.id)
6606 } else {
6607 print("nil")
6608 }
6609 print(" lockedm=")
6610 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6611 print(lockedm.id)
6612 } else {
6613 print("nil")
6614 }
6615 print("\n")
6616 })
6617 unlock(&sched.lock)
6618 }
6619
6620 type updateMaxProcsGState struct {
6621 lock mutex
6622 g *g
6623 idle atomic.Bool
6624
6625
6626 procs int32
6627 }
6628
6629 var (
6630
6631
6632 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6633
6634
6635
6636 updateMaxProcsG updateMaxProcsGState
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685 computeMaxProcsLock mutex
6686 )
6687
6688
6689
6690
6691 func defaultGOMAXPROCSUpdateEnable() {
6692 if debug.updatemaxprocs == 0 {
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704 updatemaxprocs.IncNonDefault()
6705 return
6706 }
6707
6708 go updateMaxProcsGoroutine()
6709 }
6710
6711 func updateMaxProcsGoroutine() {
6712 updateMaxProcsG.g = getg()
6713 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6714 for {
6715 lock(&updateMaxProcsG.lock)
6716 if updateMaxProcsG.idle.Load() {
6717 throw("updateMaxProcsGoroutine: phase error")
6718 }
6719 updateMaxProcsG.idle.Store(true)
6720 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6721
6722
6723 stw := stopTheWorldGC(stwGOMAXPROCS)
6724
6725
6726 lock(&sched.lock)
6727 custom := sched.customGOMAXPROCS
6728 unlock(&sched.lock)
6729 if custom {
6730 startTheWorldGC(stw)
6731 return
6732 }
6733
6734
6735
6736
6737
6738 newprocs = updateMaxProcsG.procs
6739 lock(&sched.lock)
6740 sched.customGOMAXPROCS = false
6741 unlock(&sched.lock)
6742
6743 startTheWorldGC(stw)
6744 }
6745 }
6746
6747 func sysmonUpdateGOMAXPROCS() {
6748
6749 lock(&computeMaxProcsLock)
6750
6751
6752 lock(&sched.lock)
6753 custom := sched.customGOMAXPROCS
6754 curr := gomaxprocs
6755 unlock(&sched.lock)
6756 if custom {
6757 unlock(&computeMaxProcsLock)
6758 return
6759 }
6760
6761
6762 procs := defaultGOMAXPROCS(0)
6763 unlock(&computeMaxProcsLock)
6764 if procs == curr {
6765
6766 return
6767 }
6768
6769
6770
6771
6772 if updateMaxProcsG.idle.Load() {
6773 lock(&updateMaxProcsG.lock)
6774 updateMaxProcsG.procs = procs
6775 updateMaxProcsG.idle.Store(false)
6776 var list gList
6777 list.push(updateMaxProcsG.g)
6778 injectglist(&list)
6779 unlock(&updateMaxProcsG.lock)
6780 }
6781 }
6782
6783
6784
6785
6786
6787
6788 func schedEnableUser(enable bool) {
6789 lock(&sched.lock)
6790 if sched.disable.user == !enable {
6791 unlock(&sched.lock)
6792 return
6793 }
6794 sched.disable.user = !enable
6795 if enable {
6796 n := sched.disable.runnable.size
6797 globrunqputbatch(&sched.disable.runnable)
6798 unlock(&sched.lock)
6799 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6800 startm(nil, false, false)
6801 }
6802 } else {
6803 unlock(&sched.lock)
6804 }
6805 }
6806
6807
6808
6809
6810
6811 func schedEnabled(gp *g) bool {
6812 assertLockHeld(&sched.lock)
6813
6814 if sched.disable.user {
6815 return isSystemGoroutine(gp, true)
6816 }
6817 return true
6818 }
6819
6820
6821
6822
6823
6824
6825 func mput(mp *m) {
6826 assertLockHeld(&sched.lock)
6827
6828 mp.schedlink = sched.midle
6829 sched.midle.set(mp)
6830 sched.nmidle++
6831 checkdead()
6832 }
6833
6834
6835
6836
6837
6838
6839 func mget() *m {
6840 assertLockHeld(&sched.lock)
6841
6842 mp := sched.midle.ptr()
6843 if mp != nil {
6844 sched.midle = mp.schedlink
6845 sched.nmidle--
6846 }
6847 return mp
6848 }
6849
6850
6851
6852
6853
6854
6855 func globrunqput(gp *g) {
6856 assertLockHeld(&sched.lock)
6857
6858 sched.runq.pushBack(gp)
6859 }
6860
6861
6862
6863
6864
6865
6866 func globrunqputhead(gp *g) {
6867 assertLockHeld(&sched.lock)
6868
6869 sched.runq.push(gp)
6870 }
6871
6872
6873
6874
6875
6876
6877
6878 func globrunqputbatch(batch *gQueue) {
6879 assertLockHeld(&sched.lock)
6880
6881 sched.runq.pushBackAll(*batch)
6882 *batch = gQueue{}
6883 }
6884
6885
6886
6887 func globrunqget() *g {
6888 assertLockHeld(&sched.lock)
6889
6890 if sched.runq.size == 0 {
6891 return nil
6892 }
6893
6894 return sched.runq.pop()
6895 }
6896
6897
6898
6899 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
6900 assertLockHeld(&sched.lock)
6901
6902 if sched.runq.size == 0 {
6903 return
6904 }
6905
6906 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
6907
6908 gp = sched.runq.pop()
6909 n--
6910
6911 for ; n > 0; n-- {
6912 gp1 := sched.runq.pop()
6913 q.pushBack(gp1)
6914 }
6915 return
6916 }
6917
6918
6919 type pMask []uint32
6920
6921
6922 func (p pMask) read(id uint32) bool {
6923 word := id / 32
6924 mask := uint32(1) << (id % 32)
6925 return (atomic.Load(&p[word]) & mask) != 0
6926 }
6927
6928
6929 func (p pMask) set(id int32) {
6930 word := id / 32
6931 mask := uint32(1) << (id % 32)
6932 atomic.Or(&p[word], mask)
6933 }
6934
6935
6936 func (p pMask) clear(id int32) {
6937 word := id / 32
6938 mask := uint32(1) << (id % 32)
6939 atomic.And(&p[word], ^mask)
6940 }
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953 func pidleput(pp *p, now int64) int64 {
6954 assertLockHeld(&sched.lock)
6955
6956 if !runqempty(pp) {
6957 throw("pidleput: P has non-empty run queue")
6958 }
6959 if now == 0 {
6960 now = nanotime()
6961 }
6962 if pp.timers.len.Load() == 0 {
6963 timerpMask.clear(pp.id)
6964 }
6965 idlepMask.set(pp.id)
6966 pp.link = sched.pidle
6967 sched.pidle.set(pp)
6968 sched.npidle.Add(1)
6969 if !pp.limiterEvent.start(limiterEventIdle, now) {
6970 throw("must be able to track idle limiter event")
6971 }
6972 return now
6973 }
6974
6975
6976
6977
6978
6979
6980
6981
6982 func pidleget(now int64) (*p, int64) {
6983 assertLockHeld(&sched.lock)
6984
6985 pp := sched.pidle.ptr()
6986 if pp != nil {
6987
6988 if now == 0 {
6989 now = nanotime()
6990 }
6991 timerpMask.set(pp.id)
6992 idlepMask.clear(pp.id)
6993 sched.pidle = pp.link
6994 sched.npidle.Add(-1)
6995 pp.limiterEvent.stop(limiterEventIdle, now)
6996 }
6997 return pp, now
6998 }
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010 func pidlegetSpinning(now int64) (*p, int64) {
7011 assertLockHeld(&sched.lock)
7012
7013 pp, now := pidleget(now)
7014 if pp == nil {
7015
7016
7017
7018 sched.needspinning.Store(1)
7019 return nil, now
7020 }
7021
7022 return pp, now
7023 }
7024
7025
7026
7027 func runqempty(pp *p) bool {
7028
7029
7030
7031
7032 for {
7033 head := atomic.Load(&pp.runqhead)
7034 tail := atomic.Load(&pp.runqtail)
7035 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7036 if tail == atomic.Load(&pp.runqtail) {
7037 return head == tail && runnext == 0
7038 }
7039 }
7040 }
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051 const randomizeScheduler = raceenabled
7052
7053
7054
7055
7056
7057
7058 func runqput(pp *p, gp *g, next bool) {
7059 if !haveSysmon && next {
7060
7061
7062
7063
7064
7065
7066
7067
7068 next = false
7069 }
7070 if randomizeScheduler && next && randn(2) == 0 {
7071 next = false
7072 }
7073
7074 if next {
7075 retryNext:
7076 oldnext := pp.runnext
7077 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7078 goto retryNext
7079 }
7080 if oldnext == 0 {
7081 return
7082 }
7083
7084 gp = oldnext.ptr()
7085 }
7086
7087 retry:
7088 h := atomic.LoadAcq(&pp.runqhead)
7089 t := pp.runqtail
7090 if t-h < uint32(len(pp.runq)) {
7091 pp.runq[t%uint32(len(pp.runq))].set(gp)
7092 atomic.StoreRel(&pp.runqtail, t+1)
7093 return
7094 }
7095 if runqputslow(pp, gp, h, t) {
7096 return
7097 }
7098
7099 goto retry
7100 }
7101
7102
7103
7104 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7105 var batch [len(pp.runq)/2 + 1]*g
7106
7107
7108 n := t - h
7109 n = n / 2
7110 if n != uint32(len(pp.runq)/2) {
7111 throw("runqputslow: queue is not full")
7112 }
7113 for i := uint32(0); i < n; i++ {
7114 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7115 }
7116 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7117 return false
7118 }
7119 batch[n] = gp
7120
7121 if randomizeScheduler {
7122 for i := uint32(1); i <= n; i++ {
7123 j := cheaprandn(i + 1)
7124 batch[i], batch[j] = batch[j], batch[i]
7125 }
7126 }
7127
7128
7129 for i := uint32(0); i < n; i++ {
7130 batch[i].schedlink.set(batch[i+1])
7131 }
7132
7133 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7134
7135
7136 lock(&sched.lock)
7137 globrunqputbatch(&q)
7138 unlock(&sched.lock)
7139 return true
7140 }
7141
7142
7143
7144
7145 func runqputbatch(pp *p, q *gQueue) {
7146 if q.empty() {
7147 return
7148 }
7149 h := atomic.LoadAcq(&pp.runqhead)
7150 t := pp.runqtail
7151 n := uint32(0)
7152 for !q.empty() && t-h < uint32(len(pp.runq)) {
7153 gp := q.pop()
7154 pp.runq[t%uint32(len(pp.runq))].set(gp)
7155 t++
7156 n++
7157 }
7158
7159 if randomizeScheduler {
7160 off := func(o uint32) uint32 {
7161 return (pp.runqtail + o) % uint32(len(pp.runq))
7162 }
7163 for i := uint32(1); i < n; i++ {
7164 j := cheaprandn(i + 1)
7165 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7166 }
7167 }
7168
7169 atomic.StoreRel(&pp.runqtail, t)
7170
7171 return
7172 }
7173
7174
7175
7176
7177
7178 func runqget(pp *p) (gp *g, inheritTime bool) {
7179
7180 next := pp.runnext
7181
7182
7183
7184 if next != 0 && pp.runnext.cas(next, 0) {
7185 return next.ptr(), true
7186 }
7187
7188 for {
7189 h := atomic.LoadAcq(&pp.runqhead)
7190 t := pp.runqtail
7191 if t == h {
7192 return nil, false
7193 }
7194 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7195 if atomic.CasRel(&pp.runqhead, h, h+1) {
7196 return gp, false
7197 }
7198 }
7199 }
7200
7201
7202
7203 func runqdrain(pp *p) (drainQ gQueue) {
7204 oldNext := pp.runnext
7205 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7206 drainQ.pushBack(oldNext.ptr())
7207 }
7208
7209 retry:
7210 h := atomic.LoadAcq(&pp.runqhead)
7211 t := pp.runqtail
7212 qn := t - h
7213 if qn == 0 {
7214 return
7215 }
7216 if qn > uint32(len(pp.runq)) {
7217 goto retry
7218 }
7219
7220 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7221 goto retry
7222 }
7223
7224
7225
7226
7227
7228
7229
7230
7231 for i := uint32(0); i < qn; i++ {
7232 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7233 drainQ.pushBack(gp)
7234 }
7235 return
7236 }
7237
7238
7239
7240
7241
7242 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7243 for {
7244 h := atomic.LoadAcq(&pp.runqhead)
7245 t := atomic.LoadAcq(&pp.runqtail)
7246 n := t - h
7247 n = n - n/2
7248 if n == 0 {
7249 if stealRunNextG {
7250
7251 if next := pp.runnext; next != 0 {
7252 if pp.status == _Prunning {
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263 if !osHasLowResTimer {
7264 usleep(3)
7265 } else {
7266
7267
7268
7269 osyield()
7270 }
7271 }
7272 if !pp.runnext.cas(next, 0) {
7273 continue
7274 }
7275 batch[batchHead%uint32(len(batch))] = next
7276 return 1
7277 }
7278 }
7279 return 0
7280 }
7281 if n > uint32(len(pp.runq)/2) {
7282 continue
7283 }
7284 for i := uint32(0); i < n; i++ {
7285 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7286 batch[(batchHead+i)%uint32(len(batch))] = g
7287 }
7288 if atomic.CasRel(&pp.runqhead, h, h+n) {
7289 return n
7290 }
7291 }
7292 }
7293
7294
7295
7296
7297 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7298 t := pp.runqtail
7299 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7300 if n == 0 {
7301 return nil
7302 }
7303 n--
7304 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7305 if n == 0 {
7306 return gp
7307 }
7308 h := atomic.LoadAcq(&pp.runqhead)
7309 if t-h+n >= uint32(len(pp.runq)) {
7310 throw("runqsteal: runq overflow")
7311 }
7312 atomic.StoreRel(&pp.runqtail, t+n)
7313 return gp
7314 }
7315
7316
7317
7318 type gQueue struct {
7319 head guintptr
7320 tail guintptr
7321 size int32
7322 }
7323
7324
7325 func (q *gQueue) empty() bool {
7326 return q.head == 0
7327 }
7328
7329
7330 func (q *gQueue) push(gp *g) {
7331 gp.schedlink = q.head
7332 q.head.set(gp)
7333 if q.tail == 0 {
7334 q.tail.set(gp)
7335 }
7336 q.size++
7337 }
7338
7339
7340 func (q *gQueue) pushBack(gp *g) {
7341 gp.schedlink = 0
7342 if q.tail != 0 {
7343 q.tail.ptr().schedlink.set(gp)
7344 } else {
7345 q.head.set(gp)
7346 }
7347 q.tail.set(gp)
7348 q.size++
7349 }
7350
7351
7352
7353 func (q *gQueue) pushBackAll(q2 gQueue) {
7354 if q2.tail == 0 {
7355 return
7356 }
7357 q2.tail.ptr().schedlink = 0
7358 if q.tail != 0 {
7359 q.tail.ptr().schedlink = q2.head
7360 } else {
7361 q.head = q2.head
7362 }
7363 q.tail = q2.tail
7364 q.size += q2.size
7365 }
7366
7367
7368
7369 func (q *gQueue) pop() *g {
7370 gp := q.head.ptr()
7371 if gp != nil {
7372 q.head = gp.schedlink
7373 if q.head == 0 {
7374 q.tail = 0
7375 }
7376 q.size--
7377 }
7378 return gp
7379 }
7380
7381
7382 func (q *gQueue) popList() gList {
7383 stack := gList{q.head, q.size}
7384 *q = gQueue{}
7385 return stack
7386 }
7387
7388
7389
7390 type gList struct {
7391 head guintptr
7392 size int32
7393 }
7394
7395
7396 func (l *gList) empty() bool {
7397 return l.head == 0
7398 }
7399
7400
7401 func (l *gList) push(gp *g) {
7402 gp.schedlink = l.head
7403 l.head.set(gp)
7404 l.size++
7405 }
7406
7407
7408 func (l *gList) pushAll(q gQueue) {
7409 if !q.empty() {
7410 q.tail.ptr().schedlink = l.head
7411 l.head = q.head
7412 l.size += q.size
7413 }
7414 }
7415
7416
7417 func (l *gList) pop() *g {
7418 gp := l.head.ptr()
7419 if gp != nil {
7420 l.head = gp.schedlink
7421 l.size--
7422 }
7423 return gp
7424 }
7425
7426
7427 func setMaxThreads(in int) (out int) {
7428 lock(&sched.lock)
7429 out = int(sched.maxmcount)
7430 if in > 0x7fffffff {
7431 sched.maxmcount = 0x7fffffff
7432 } else {
7433 sched.maxmcount = int32(in)
7434 }
7435 checkmcount()
7436 unlock(&sched.lock)
7437 return
7438 }
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450
7451
7452 func procPin() int {
7453 gp := getg()
7454 mp := gp.m
7455
7456 mp.locks++
7457 return int(mp.p.ptr().id)
7458 }
7459
7460
7461
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471
7472 func procUnpin() {
7473 gp := getg()
7474 gp.m.locks--
7475 }
7476
7477
7478
7479 func sync_runtime_procPin() int {
7480 return procPin()
7481 }
7482
7483
7484
7485 func sync_runtime_procUnpin() {
7486 procUnpin()
7487 }
7488
7489
7490
7491 func sync_atomic_runtime_procPin() int {
7492 return procPin()
7493 }
7494
7495
7496
7497 func sync_atomic_runtime_procUnpin() {
7498 procUnpin()
7499 }
7500
7501
7502
7503
7504
7505 func internal_sync_runtime_canSpin(i int) bool {
7506
7507
7508
7509
7510
7511 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7512 return false
7513 }
7514 if p := getg().m.p.ptr(); !runqempty(p) {
7515 return false
7516 }
7517 return true
7518 }
7519
7520
7521
7522 func internal_sync_runtime_doSpin() {
7523 procyield(active_spin_cnt)
7524 }
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540 func sync_runtime_canSpin(i int) bool {
7541 return internal_sync_runtime_canSpin(i)
7542 }
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555
7556 func sync_runtime_doSpin() {
7557 internal_sync_runtime_doSpin()
7558 }
7559
7560 var stealOrder randomOrder
7561
7562
7563
7564
7565
7566 type randomOrder struct {
7567 count uint32
7568 coprimes []uint32
7569 }
7570
7571 type randomEnum struct {
7572 i uint32
7573 count uint32
7574 pos uint32
7575 inc uint32
7576 }
7577
7578 func (ord *randomOrder) reset(count uint32) {
7579 ord.count = count
7580 ord.coprimes = ord.coprimes[:0]
7581 for i := uint32(1); i <= count; i++ {
7582 if gcd(i, count) == 1 {
7583 ord.coprimes = append(ord.coprimes, i)
7584 }
7585 }
7586 }
7587
7588 func (ord *randomOrder) start(i uint32) randomEnum {
7589 return randomEnum{
7590 count: ord.count,
7591 pos: i % ord.count,
7592 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7593 }
7594 }
7595
7596 func (enum *randomEnum) done() bool {
7597 return enum.i == enum.count
7598 }
7599
7600 func (enum *randomEnum) next() {
7601 enum.i++
7602 enum.pos = (enum.pos + enum.inc) % enum.count
7603 }
7604
7605 func (enum *randomEnum) position() uint32 {
7606 return enum.pos
7607 }
7608
7609 func gcd(a, b uint32) uint32 {
7610 for b != 0 {
7611 a, b = b, a%b
7612 }
7613 return a
7614 }
7615
7616
7617
7618 type initTask struct {
7619 state uint32
7620 nfns uint32
7621
7622 }
7623
7624
7625
7626 var inittrace tracestat
7627
7628 type tracestat struct {
7629 active bool
7630 id uint64
7631 allocs uint64
7632 bytes uint64
7633 }
7634
7635 func doInit(ts []*initTask) {
7636 for _, t := range ts {
7637 doInit1(t)
7638 }
7639 }
7640
7641 func doInit1(t *initTask) {
7642 switch t.state {
7643 case 2:
7644 return
7645 case 1:
7646 throw("recursive call during initialization - linker skew")
7647 default:
7648 t.state = 1
7649
7650 var (
7651 start int64
7652 before tracestat
7653 )
7654
7655 if inittrace.active {
7656 start = nanotime()
7657
7658 before = inittrace
7659 }
7660
7661 if t.nfns == 0 {
7662
7663 throw("inittask with no functions")
7664 }
7665
7666 firstFunc := add(unsafe.Pointer(t), 8)
7667 for i := uint32(0); i < t.nfns; i++ {
7668 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7669 f := *(*func())(unsafe.Pointer(&p))
7670 f()
7671 }
7672
7673 if inittrace.active {
7674 end := nanotime()
7675
7676 after := inittrace
7677
7678 f := *(*func())(unsafe.Pointer(&firstFunc))
7679 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7680
7681 var sbuf [24]byte
7682 print("init ", pkg, " @")
7683 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7684 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7685 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7686 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7687 print("\n")
7688 }
7689
7690 t.state = 2
7691 }
7692 }
7693
View as plain text