Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/strconv"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291
292
293
294
295
296
297
298 exitHooksRun := false
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324 if raceenabled {
325 racefini()
326 }
327
328 exit(0)
329 for {
330 var x *int32
331 *x = 0
332 }
333 }
334
335
336
337
338 func os_beforeExit(exitCode int) {
339 runExitHooks(exitCode)
340 if exitCode == 0 && raceenabled {
341 racefini()
342 }
343
344
345 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
346 lsandoleakcheck()
347 }
348 }
349
350 func init() {
351 exithook.Gosched = Gosched
352 exithook.Goid = func() uint64 { return getg().goid }
353 exithook.Throw = throw
354 }
355
356 func runExitHooks(code int) {
357 exithook.Run(code)
358 }
359
360
361 func init() {
362 go forcegchelper()
363 }
364
365 func forcegchelper() {
366 forcegc.g = getg()
367 lockInit(&forcegc.lock, lockRankForcegc)
368 for {
369 lock(&forcegc.lock)
370 if forcegc.idle.Load() {
371 throw("forcegc: phase error")
372 }
373 forcegc.idle.Store(true)
374 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
375
376 if debug.gctrace > 0 {
377 println("GC forced")
378 }
379
380 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
381 }
382 }
383
384
385
386
387
388 func Gosched() {
389 checkTimeouts()
390 mcall(gosched_m)
391 }
392
393
394
395
396
397 func goschedguarded() {
398 mcall(goschedguarded_m)
399 }
400
401
402
403
404
405
406 func goschedIfBusy() {
407 gp := getg()
408
409
410 if !gp.preempt && sched.npidle.Load() > 0 {
411 return
412 }
413 mcall(gosched_m)
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
445 if reason != waitReasonSleep {
446 checkTimeouts()
447 }
448 mp := acquirem()
449 gp := mp.curg
450 status := readgstatus(gp)
451 if status != _Grunning && status != _Gscanrunning {
452 throw("gopark: bad g status")
453 }
454 mp.waitlock = lock
455 mp.waitunlockf = unlockf
456 gp.waitreason = reason
457 mp.waitTraceBlockReason = traceReason
458 mp.waitTraceSkip = traceskip
459 releasem(mp)
460
461 mcall(park_m)
462 }
463
464
465
466 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
467 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
468 }
469
470
471
472
473
474
475
476
477
478
479
480 func goready(gp *g, traceskip int) {
481 systemstack(func() {
482 ready(gp, traceskip, true)
483 })
484 }
485
486
487 func acquireSudog() *sudog {
488
489
490
491
492
493
494
495
496 mp := acquirem()
497 pp := mp.p.ptr()
498 if len(pp.sudogcache) == 0 {
499 lock(&sched.sudoglock)
500
501 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
502 s := sched.sudogcache
503 sched.sudogcache = s.next
504 s.next = nil
505 pp.sudogcache = append(pp.sudogcache, s)
506 }
507 unlock(&sched.sudoglock)
508
509 if len(pp.sudogcache) == 0 {
510 pp.sudogcache = append(pp.sudogcache, new(sudog))
511 }
512 }
513 n := len(pp.sudogcache)
514 s := pp.sudogcache[n-1]
515 pp.sudogcache[n-1] = nil
516 pp.sudogcache = pp.sudogcache[:n-1]
517 if s.elem.get() != nil {
518 throw("acquireSudog: found s.elem != nil in cache")
519 }
520 releasem(mp)
521 return s
522 }
523
524
525 func releaseSudog(s *sudog) {
526 if s.elem.get() != nil {
527 throw("runtime: sudog with non-nil elem")
528 }
529 if s.isSelect {
530 throw("runtime: sudog with non-false isSelect")
531 }
532 if s.next != nil {
533 throw("runtime: sudog with non-nil next")
534 }
535 if s.prev != nil {
536 throw("runtime: sudog with non-nil prev")
537 }
538 if s.waitlink != nil {
539 throw("runtime: sudog with non-nil waitlink")
540 }
541 if s.c.get() != nil {
542 throw("runtime: sudog with non-nil c")
543 }
544 gp := getg()
545 if gp.param != nil {
546 throw("runtime: releaseSudog with non-nil gp.param")
547 }
548 mp := acquirem()
549 pp := mp.p.ptr()
550 if len(pp.sudogcache) == cap(pp.sudogcache) {
551
552 var first, last *sudog
553 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
554 n := len(pp.sudogcache)
555 p := pp.sudogcache[n-1]
556 pp.sudogcache[n-1] = nil
557 pp.sudogcache = pp.sudogcache[:n-1]
558 if first == nil {
559 first = p
560 } else {
561 last.next = p
562 }
563 last = p
564 }
565 lock(&sched.sudoglock)
566 last.next = sched.sudogcache
567 sched.sudogcache = first
568 unlock(&sched.sudoglock)
569 }
570 pp.sudogcache = append(pp.sudogcache, s)
571 releasem(mp)
572 }
573
574
575 func badmcall(fn func(*g)) {
576 throw("runtime: mcall called on m->g0 stack")
577 }
578
579 func badmcall2(fn func(*g)) {
580 throw("runtime: mcall function returned")
581 }
582
583 func badreflectcall() {
584 panic(plainError("arg size to reflect.call more than 1GB"))
585 }
586
587
588
589 func badmorestackg0() {
590 if !crashStackImplemented {
591 writeErrStr("fatal: morestack on g0\n")
592 return
593 }
594
595 g := getg()
596 switchToCrashStack(func() {
597 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
598 g.m.traceback = 2
599 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
600 print("\n")
601
602 throw("morestack on g0")
603 })
604 }
605
606
607
608 func badmorestackgsignal() {
609 writeErrStr("fatal: morestack on gsignal\n")
610 }
611
612
613 func badctxt() {
614 throw("ctxt != 0")
615 }
616
617
618
619 var gcrash g
620
621 var crashingG atomic.Pointer[g]
622
623
624
625
626
627
628
629
630
631 func switchToCrashStack(fn func()) {
632 me := getg()
633 if crashingG.CompareAndSwapNoWB(nil, me) {
634 switchToCrashStack0(fn)
635 abort()
636 }
637 if crashingG.Load() == me {
638
639 writeErrStr("fatal: recursive switchToCrashStack\n")
640 abort()
641 }
642
643 usleep_no_g(100)
644 writeErrStr("fatal: concurrent switchToCrashStack\n")
645 abort()
646 }
647
648
649
650
651 const crashStackImplemented = GOOS != "windows"
652
653
654 func switchToCrashStack0(fn func())
655
656 func lockedOSThread() bool {
657 gp := getg()
658 return gp.lockedm != 0 && gp.m.lockedg != 0
659 }
660
661 var (
662
663
664
665
666
667
668 allglock mutex
669 allgs []*g
670
671
672
673
674
675
676
677
678
679
680
681
682
683 allglen uintptr
684 allgptr **g
685 )
686
687 func allgadd(gp *g) {
688 if readgstatus(gp) == _Gidle {
689 throw("allgadd: bad status Gidle")
690 }
691
692 lock(&allglock)
693 allgs = append(allgs, gp)
694 if &allgs[0] != allgptr {
695 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
696 }
697 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
698 unlock(&allglock)
699 }
700
701
702
703
704 func allGsSnapshot() []*g {
705 assertWorldStoppedOrLockHeld(&allglock)
706
707
708
709
710
711
712 return allgs[:len(allgs):len(allgs)]
713 }
714
715
716 func atomicAllG() (**g, uintptr) {
717 length := atomic.Loaduintptr(&allglen)
718 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
719 return ptr, length
720 }
721
722
723 func atomicAllGIndex(ptr **g, i uintptr) *g {
724 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
725 }
726
727
728
729
730 func forEachG(fn func(gp *g)) {
731 lock(&allglock)
732 for _, gp := range allgs {
733 fn(gp)
734 }
735 unlock(&allglock)
736 }
737
738
739
740
741
742 func forEachGRace(fn func(gp *g)) {
743 ptr, length := atomicAllG()
744 for i := uintptr(0); i < length; i++ {
745 gp := atomicAllGIndex(ptr, i)
746 fn(gp)
747 }
748 return
749 }
750
751 const (
752
753
754 _GoidCacheBatch = 16
755 )
756
757
758
759 func cpuinit(env string) {
760 cpu.Initialize(env)
761
762
763
764 switch GOARCH {
765 case "386", "amd64":
766 x86HasPOPCNT = cpu.X86.HasPOPCNT
767 x86HasSSE41 = cpu.X86.HasSSE41
768 x86HasFMA = cpu.X86.HasFMA
769
770 case "arm":
771 armHasVFPv4 = cpu.ARM.HasVFPv4
772
773 case "arm64":
774 arm64HasATOMICS = cpu.ARM64.HasATOMICS
775
776 case "loong64":
777 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
778 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
779 loong64HasLSX = cpu.Loong64.HasLSX
780
781 case "riscv64":
782 riscv64HasZbb = cpu.RISCV64.HasZbb
783 }
784 }
785
786
787
788
789
790
791 func getGodebugEarly() (string, bool) {
792 const prefix = "GODEBUG="
793 var env string
794 switch GOOS {
795 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
796
797
798
799 n := int32(0)
800 for argv_index(argv, argc+1+n) != nil {
801 n++
802 }
803
804 for i := int32(0); i < n; i++ {
805 p := argv_index(argv, argc+1+i)
806 s := unsafe.String(p, findnull(p))
807
808 if stringslite.HasPrefix(s, prefix) {
809 env = gostringnocopy(p)[len(prefix):]
810 break
811 }
812 }
813 break
814
815 default:
816 return "", false
817 }
818 return env, true
819 }
820
821
822
823
824
825
826
827
828
829 func schedinit() {
830 lockInit(&sched.lock, lockRankSched)
831 lockInit(&sched.sysmonlock, lockRankSysmon)
832 lockInit(&sched.deferlock, lockRankDefer)
833 lockInit(&sched.sudoglock, lockRankSudog)
834 lockInit(&deadlock, lockRankDeadlock)
835 lockInit(&paniclk, lockRankPanic)
836 lockInit(&allglock, lockRankAllg)
837 lockInit(&allpLock, lockRankAllp)
838 lockInit(&reflectOffs.lock, lockRankReflectOffs)
839 lockInit(&finlock, lockRankFin)
840 lockInit(&cpuprof.lock, lockRankCpuprof)
841 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
842 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
843 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
844 traceLockInit()
845
846
847
848 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
849
850 lockVerifyMSize()
851
852
853
854 gp := getg()
855 if raceenabled {
856 gp.racectx, raceprocctx0 = raceinit()
857 }
858
859 sched.maxmcount = 10000
860 crashFD.Store(^uintptr(0))
861
862
863 worldStopped()
864
865 godebug, parsedGodebug := getGodebugEarly()
866 if parsedGodebug {
867 parseRuntimeDebugVars(godebug)
868 }
869 ticks.init()
870 moduledataverify()
871 stackinit()
872 randinit()
873 mallocinit()
874 cpuinit(godebug)
875 alginit()
876 mcommoninit(gp.m, -1)
877 modulesinit()
878 typelinksinit()
879 itabsinit()
880 stkobjinit()
881
882 sigsave(&gp.m.sigmask)
883 initSigmask = gp.m.sigmask
884
885 goargs()
886 goenvs()
887 secure()
888 checkfds()
889 if !parsedGodebug {
890
891
892 parseRuntimeDebugVars(gogetenv("GODEBUG"))
893 }
894 finishDebugVarsSetup()
895 gcinit()
896
897
898
899 gcrash.stack = stackalloc(16384)
900 gcrash.stackguard0 = gcrash.stack.lo + 1000
901 gcrash.stackguard1 = gcrash.stack.lo + 1000
902
903
904
905
906
907 if disableMemoryProfiling {
908 MemProfileRate = 0
909 }
910
911
912 mProfStackInit(gp.m)
913 defaultGOMAXPROCSInit()
914
915 lock(&sched.lock)
916 sched.lastpoll.Store(nanotime())
917 var procs int32
918 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
919 procs = int32(n)
920 sched.customGOMAXPROCS = true
921 } else {
922
923
924
925
926
927
928
929
930 procs = defaultGOMAXPROCS(numCPUStartup)
931 }
932 if procresize(procs) != nil {
933 throw("unknown runnable goroutine during bootstrap")
934 }
935 unlock(&sched.lock)
936
937
938 worldStarted()
939
940 if buildVersion == "" {
941
942
943 buildVersion = "unknown"
944 }
945 if len(modinfo) == 1 {
946
947
948 modinfo = ""
949 }
950 }
951
952 func dumpgstatus(gp *g) {
953 thisg := getg()
954 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
955 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
956 }
957
958
959 func checkmcount() {
960 assertLockHeld(&sched.lock)
961
962
963
964
965
966
967
968
969
970 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
971 if count > sched.maxmcount {
972 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
973 throw("thread exhaustion")
974 }
975 }
976
977
978
979
980
981 func mReserveID() int64 {
982 assertLockHeld(&sched.lock)
983
984 if sched.mnext+1 < sched.mnext {
985 throw("runtime: thread ID overflow")
986 }
987 id := sched.mnext
988 sched.mnext++
989 checkmcount()
990 return id
991 }
992
993
994 func mcommoninit(mp *m, id int64) {
995 gp := getg()
996
997
998 if gp != gp.m.g0 {
999 callers(1, mp.createstack[:])
1000 }
1001
1002 lock(&sched.lock)
1003
1004 if id >= 0 {
1005 mp.id = id
1006 } else {
1007 mp.id = mReserveID()
1008 }
1009
1010 mrandinit(mp)
1011
1012 mpreinit(mp)
1013 if mp.gsignal != nil {
1014 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1015 }
1016
1017
1018
1019 mp.alllink = allm
1020
1021
1022
1023 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1024 unlock(&sched.lock)
1025
1026
1027 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1028 mp.cgoCallers = new(cgoCallers)
1029 }
1030 mProfStackInit(mp)
1031 }
1032
1033
1034
1035
1036
1037 func mProfStackInit(mp *m) {
1038 if debug.profstackdepth == 0 {
1039
1040
1041 return
1042 }
1043 mp.profStack = makeProfStackFP()
1044 mp.mLockProfile.stack = makeProfStackFP()
1045 }
1046
1047
1048
1049
1050 func makeProfStackFP() []uintptr {
1051
1052
1053
1054
1055
1056
1057 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1058 }
1059
1060
1061
1062 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1063
1064
1065 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1066
1067 func (mp *m) becomeSpinning() {
1068 mp.spinning = true
1069 sched.nmspinning.Add(1)
1070 sched.needspinning.Store(0)
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080 func (mp *m) snapshotAllp() []*p {
1081 mp.allpSnapshot = allp
1082 return mp.allpSnapshot
1083 }
1084
1085
1086
1087
1088
1089
1090
1091 func (mp *m) clearAllpSnapshot() {
1092 mp.allpSnapshot = nil
1093 }
1094
1095 func (mp *m) hasCgoOnStack() bool {
1096 return mp.ncgo > 0 || mp.isextra
1097 }
1098
1099 const (
1100
1101
1102 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1103
1104
1105
1106 osHasLowResClockInt = goos.IsWindows
1107
1108
1109
1110 osHasLowResClock = osHasLowResClockInt > 0
1111 )
1112
1113
1114 func ready(gp *g, traceskip int, next bool) {
1115 status := readgstatus(gp)
1116
1117
1118 mp := acquirem()
1119 if status&^_Gscan != _Gwaiting {
1120 dumpgstatus(gp)
1121 throw("bad g->status in ready")
1122 }
1123
1124
1125 trace := traceAcquire()
1126 casgstatus(gp, _Gwaiting, _Grunnable)
1127 if trace.ok() {
1128 trace.GoUnpark(gp, traceskip)
1129 traceRelease(trace)
1130 }
1131 runqput(mp.p.ptr(), gp, next)
1132 wakep()
1133 releasem(mp)
1134 }
1135
1136
1137
1138 const freezeStopWait = 0x7fffffff
1139
1140
1141
1142 var freezing atomic.Bool
1143
1144
1145
1146
1147 func freezetheworld() {
1148 freezing.Store(true)
1149 if debug.dontfreezetheworld > 0 {
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 usleep(1000)
1175 return
1176 }
1177
1178
1179
1180
1181 for i := 0; i < 5; i++ {
1182
1183 sched.stopwait = freezeStopWait
1184 sched.gcwaiting.Store(true)
1185
1186 if !preemptall() {
1187 break
1188 }
1189 usleep(1000)
1190 }
1191
1192 usleep(1000)
1193 preemptall()
1194 usleep(1000)
1195 }
1196
1197
1198
1199
1200
1201 func readgstatus(gp *g) uint32 {
1202 return gp.atomicstatus.Load()
1203 }
1204
1205
1206
1207
1208
1209 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1210 success := false
1211
1212
1213 switch oldval {
1214 default:
1215 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1216 dumpgstatus(gp)
1217 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1218 case _Gscanrunnable,
1219 _Gscanwaiting,
1220 _Gscanrunning,
1221 _Gscansyscall,
1222 _Gscanleaked,
1223 _Gscanpreempted,
1224 _Gscandeadextra:
1225 if newval == oldval&^_Gscan {
1226 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1227 }
1228 }
1229 if !success {
1230 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1231 dumpgstatus(gp)
1232 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1233 }
1234 releaseLockRankAndM(lockRankGscan)
1235 }
1236
1237
1238
1239 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1240 switch oldval {
1241 case _Grunnable,
1242 _Grunning,
1243 _Gwaiting,
1244 _Gleaked,
1245 _Gsyscall,
1246 _Gdeadextra:
1247 if newval == oldval|_Gscan {
1248 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1249 if r {
1250 acquireLockRankAndM(lockRankGscan)
1251 }
1252 return r
1253
1254 }
1255 }
1256 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1257 throw("castogscanstatus")
1258 panic("not reached")
1259 }
1260
1261
1262
1263 var casgstatusAlwaysTrack = false
1264
1265
1266
1267
1268
1269
1270
1271 func casgstatus(gp *g, oldval, newval uint32) {
1272 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1273 systemstack(func() {
1274
1275
1276 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1277 throw("casgstatus: bad incoming values")
1278 })
1279 }
1280
1281 lockWithRankMayAcquire(nil, lockRankGscan)
1282
1283
1284 const yieldDelay = 5 * 1000
1285 var nextYield int64
1286
1287
1288
1289 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1290 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1291 systemstack(func() {
1292
1293
1294 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1295 })
1296 }
1297 if i == 0 {
1298 nextYield = nanotime() + yieldDelay
1299 }
1300 if nanotime() < nextYield {
1301 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1302 procyield(1)
1303 }
1304 } else {
1305 osyield()
1306 nextYield = nanotime() + yieldDelay/2
1307 }
1308 }
1309
1310 if gp.bubble != nil {
1311 systemstack(func() {
1312 gp.bubble.changegstatus(gp, oldval, newval)
1313 })
1314 }
1315
1316 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1317
1318
1319 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1320 gp.tracking = true
1321 }
1322 gp.trackingSeq++
1323 }
1324 if !gp.tracking {
1325 return
1326 }
1327
1328
1329
1330
1331
1332
1333 switch oldval {
1334 case _Grunnable:
1335
1336
1337
1338 now := nanotime()
1339 gp.runnableTime += now - gp.trackingStamp
1340 gp.trackingStamp = 0
1341 case _Gwaiting:
1342 if !gp.waitreason.isMutexWait() {
1343
1344 break
1345 }
1346
1347
1348
1349
1350
1351 now := nanotime()
1352 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1353 gp.trackingStamp = 0
1354 }
1355 switch newval {
1356 case _Gwaiting:
1357 if !gp.waitreason.isMutexWait() {
1358
1359 break
1360 }
1361
1362 now := nanotime()
1363 gp.trackingStamp = now
1364 case _Grunnable:
1365
1366
1367 now := nanotime()
1368 gp.trackingStamp = now
1369 case _Grunning:
1370
1371
1372
1373 gp.tracking = false
1374 sched.timeToRun.record(gp.runnableTime)
1375 gp.runnableTime = 0
1376 }
1377 }
1378
1379
1380
1381
1382 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1383
1384 gp.waitreason = reason
1385 casgstatus(gp, old, _Gwaiting)
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1396 if !reason.isWaitingForSuspendG() {
1397 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1398 }
1399 casGToWaiting(gp, old, reason)
1400 }
1401
1402
1403
1404
1405
1406 func casGToPreemptScan(gp *g, old, new uint32) {
1407 if old != _Grunning || new != _Gscan|_Gpreempted {
1408 throw("bad g transition")
1409 }
1410 acquireLockRankAndM(lockRankGscan)
1411 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1412 }
1413
1414
1415
1416
1417
1418
1419 }
1420
1421
1422
1423
1424 func casGFromPreempted(gp *g, old, new uint32) bool {
1425 if old != _Gpreempted || new != _Gwaiting {
1426 throw("bad g transition")
1427 }
1428 gp.waitreason = waitReasonPreempted
1429 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1430 return false
1431 }
1432 if bubble := gp.bubble; bubble != nil {
1433 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1434 }
1435 return true
1436 }
1437
1438
1439 type stwReason uint8
1440
1441
1442
1443
1444 const (
1445 stwUnknown stwReason = iota
1446 stwGCMarkTerm
1447 stwGCSweepTerm
1448 stwWriteHeapDump
1449 stwGoroutineProfile
1450 stwGoroutineProfileCleanup
1451 stwAllGoroutinesStack
1452 stwReadMemStats
1453 stwAllThreadsSyscall
1454 stwGOMAXPROCS
1455 stwStartTrace
1456 stwStopTrace
1457 stwForTestCountPagesInUse
1458 stwForTestReadMetricsSlow
1459 stwForTestReadMemStatsSlow
1460 stwForTestPageCachePagesLeaked
1461 stwForTestResetDebugLog
1462 )
1463
1464 func (r stwReason) String() string {
1465 return stwReasonStrings[r]
1466 }
1467
1468 func (r stwReason) isGC() bool {
1469 return r == stwGCMarkTerm || r == stwGCSweepTerm
1470 }
1471
1472
1473
1474
1475 var stwReasonStrings = [...]string{
1476 stwUnknown: "unknown",
1477 stwGCMarkTerm: "GC mark termination",
1478 stwGCSweepTerm: "GC sweep termination",
1479 stwWriteHeapDump: "write heap dump",
1480 stwGoroutineProfile: "goroutine profile",
1481 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1482 stwAllGoroutinesStack: "all goroutines stack trace",
1483 stwReadMemStats: "read mem stats",
1484 stwAllThreadsSyscall: "AllThreadsSyscall",
1485 stwGOMAXPROCS: "GOMAXPROCS",
1486 stwStartTrace: "start trace",
1487 stwStopTrace: "stop trace",
1488 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1489 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1490 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1491 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1492 stwForTestResetDebugLog: "ResetDebugLog (test)",
1493 }
1494
1495
1496
1497 type worldStop struct {
1498 reason stwReason
1499 startedStopping int64
1500 finishedStopping int64
1501 stoppingCPUTime int64
1502 }
1503
1504
1505
1506
1507 var stopTheWorldContext worldStop
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 func stopTheWorld(reason stwReason) worldStop {
1527 semacquire(&worldsema)
1528 gp := getg()
1529 gp.m.preemptoff = reason.String()
1530 systemstack(func() {
1531 stopTheWorldContext = stopTheWorldWithSema(reason)
1532 })
1533 return stopTheWorldContext
1534 }
1535
1536
1537
1538
1539 func startTheWorld(w worldStop) {
1540 systemstack(func() { startTheWorldWithSema(0, w) })
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 mp := acquirem()
1558 mp.preemptoff = ""
1559 semrelease1(&worldsema, true, 0)
1560 releasem(mp)
1561 }
1562
1563
1564
1565
1566 func stopTheWorldGC(reason stwReason) worldStop {
1567 semacquire(&gcsema)
1568 return stopTheWorld(reason)
1569 }
1570
1571
1572
1573
1574 func startTheWorldGC(w worldStop) {
1575 startTheWorld(w)
1576 semrelease(&gcsema)
1577 }
1578
1579
1580 var worldsema uint32 = 1
1581
1582
1583
1584
1585
1586
1587
1588 var gcsema uint32 = 1
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 func stopTheWorldWithSema(reason stwReason) worldStop {
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1636
1637 trace := traceAcquire()
1638 if trace.ok() {
1639 trace.STWStart(reason)
1640 traceRelease(trace)
1641 }
1642 gp := getg()
1643
1644
1645
1646 if gp.m.locks > 0 {
1647 throw("stopTheWorld: holding locks")
1648 }
1649
1650 lock(&sched.lock)
1651 start := nanotime()
1652 sched.stopwait = gomaxprocs
1653 sched.gcwaiting.Store(true)
1654 preemptall()
1655
1656
1657 gp.m.p.ptr().status = _Pgcstop
1658 gp.m.p.ptr().gcStopTime = start
1659 sched.stopwait--
1660
1661
1662 for _, pp := range allp {
1663 if thread, ok := setBlockOnExitSyscall(pp); ok {
1664 thread.gcstopP()
1665 thread.resume()
1666 }
1667 }
1668
1669
1670 now := nanotime()
1671 for {
1672 pp, _ := pidleget(now)
1673 if pp == nil {
1674 break
1675 }
1676 pp.status = _Pgcstop
1677 pp.gcStopTime = nanotime()
1678 sched.stopwait--
1679 }
1680 wait := sched.stopwait > 0
1681 unlock(&sched.lock)
1682
1683
1684 if wait {
1685 for {
1686
1687 if notetsleep(&sched.stopnote, 100*1000) {
1688 noteclear(&sched.stopnote)
1689 break
1690 }
1691 preemptall()
1692 }
1693 }
1694
1695 finish := nanotime()
1696 startTime := finish - start
1697 if reason.isGC() {
1698 sched.stwStoppingTimeGC.record(startTime)
1699 } else {
1700 sched.stwStoppingTimeOther.record(startTime)
1701 }
1702
1703
1704
1705
1706
1707 stoppingCPUTime := int64(0)
1708 bad := ""
1709 if sched.stopwait != 0 {
1710 bad = "stopTheWorld: not stopped (stopwait != 0)"
1711 } else {
1712 for _, pp := range allp {
1713 if pp.status != _Pgcstop {
1714 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1715 }
1716 if pp.gcStopTime == 0 && bad == "" {
1717 bad = "stopTheWorld: broken CPU time accounting"
1718 }
1719 stoppingCPUTime += finish - pp.gcStopTime
1720 pp.gcStopTime = 0
1721 }
1722 }
1723 if freezing.Load() {
1724
1725
1726
1727
1728 lock(&deadlock)
1729 lock(&deadlock)
1730 }
1731 if bad != "" {
1732 throw(bad)
1733 }
1734
1735 worldStopped()
1736
1737
1738 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1739
1740 return worldStop{
1741 reason: reason,
1742 startedStopping: start,
1743 finishedStopping: finish,
1744 stoppingCPUTime: stoppingCPUTime,
1745 }
1746 }
1747
1748
1749
1750
1751
1752
1753
1754 func startTheWorldWithSema(now int64, w worldStop) int64 {
1755 assertWorldStopped()
1756
1757 mp := acquirem()
1758 if netpollinited() {
1759 list, delta := netpoll(0)
1760 injectglist(&list)
1761 netpollAdjustWaiters(delta)
1762 }
1763 lock(&sched.lock)
1764
1765 procs := gomaxprocs
1766 if newprocs != 0 {
1767 procs = newprocs
1768 newprocs = 0
1769 }
1770 p1 := procresize(procs)
1771 sched.gcwaiting.Store(false)
1772 if sched.sysmonwait.Load() {
1773 sched.sysmonwait.Store(false)
1774 notewakeup(&sched.sysmonnote)
1775 }
1776 unlock(&sched.lock)
1777
1778 worldStarted()
1779
1780 for p1 != nil {
1781 p := p1
1782 p1 = p1.link.ptr()
1783 if p.m != 0 {
1784 mp := p.m.ptr()
1785 p.m = 0
1786 if mp.nextp != 0 {
1787 throw("startTheWorld: inconsistent mp->nextp")
1788 }
1789 mp.nextp.set(p)
1790 notewakeup(&mp.park)
1791 } else {
1792
1793 newm(nil, p, -1)
1794 }
1795 }
1796
1797
1798 if now == 0 {
1799 now = nanotime()
1800 }
1801 totalTime := now - w.startedStopping
1802 if w.reason.isGC() {
1803 sched.stwTotalTimeGC.record(totalTime)
1804 } else {
1805 sched.stwTotalTimeOther.record(totalTime)
1806 }
1807 trace := traceAcquire()
1808 if trace.ok() {
1809 trace.STWDone()
1810 traceRelease(trace)
1811 }
1812
1813
1814
1815
1816 wakep()
1817
1818 releasem(mp)
1819
1820 return now
1821 }
1822
1823
1824
1825 func usesLibcall() bool {
1826 switch GOOS {
1827 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1828 return true
1829 }
1830 return false
1831 }
1832
1833
1834
1835 func mStackIsSystemAllocated() bool {
1836 switch GOOS {
1837 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1838 return true
1839 }
1840 return false
1841 }
1842
1843
1844
1845 func mstart()
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 func mstart0() {
1857 gp := getg()
1858
1859 osStack := gp.stack.lo == 0
1860 if osStack {
1861
1862
1863
1864
1865
1866
1867
1868
1869 size := gp.stack.hi
1870 if size == 0 {
1871 size = 16384 * sys.StackGuardMultiplier
1872 }
1873 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1874 gp.stack.lo = gp.stack.hi - size + 1024
1875 }
1876
1877
1878 gp.stackguard0 = gp.stack.lo + stackGuard
1879
1880
1881 gp.stackguard1 = gp.stackguard0
1882 mstart1()
1883
1884
1885 if mStackIsSystemAllocated() {
1886
1887
1888
1889 osStack = true
1890 }
1891 mexit(osStack)
1892 }
1893
1894
1895
1896
1897
1898 func mstart1() {
1899 gp := getg()
1900
1901 if gp != gp.m.g0 {
1902 throw("bad runtime·mstart")
1903 }
1904
1905
1906
1907
1908
1909
1910
1911 gp.sched.g = guintptr(unsafe.Pointer(gp))
1912 gp.sched.pc = sys.GetCallerPC()
1913 gp.sched.sp = sys.GetCallerSP()
1914
1915 asminit()
1916 minit()
1917
1918
1919
1920 if gp.m == &m0 {
1921 mstartm0()
1922 }
1923
1924 if debug.dataindependenttiming == 1 {
1925 sys.EnableDIT()
1926 }
1927
1928 if fn := gp.m.mstartfn; fn != nil {
1929 fn()
1930 }
1931
1932 if gp.m != &m0 {
1933 acquirep(gp.m.nextp.ptr())
1934 gp.m.nextp = 0
1935 }
1936 schedule()
1937 }
1938
1939
1940
1941
1942
1943
1944
1945 func mstartm0() {
1946
1947
1948
1949 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1950 cgoHasExtraM = true
1951 newextram()
1952 }
1953 initsig(false)
1954 }
1955
1956
1957
1958
1959 func mPark() {
1960 gp := getg()
1961 notesleep(&gp.m.park)
1962 noteclear(&gp.m.park)
1963 }
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 func mexit(osStack bool) {
1976 mp := getg().m
1977
1978 if mp == &m0 {
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990 handoffp(releasep())
1991 lock(&sched.lock)
1992 sched.nmfreed++
1993 checkdead()
1994 unlock(&sched.lock)
1995 mPark()
1996 throw("locked m0 woke up")
1997 }
1998
1999 sigblock(true)
2000 unminit()
2001
2002
2003 if mp.gsignal != nil {
2004 stackfree(mp.gsignal.stack)
2005 if valgrindenabled {
2006 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2007 mp.gsignal.valgrindStackID = 0
2008 }
2009
2010
2011
2012
2013 mp.gsignal = nil
2014 }
2015
2016
2017 vgetrandomDestroy(mp)
2018
2019
2020 lock(&sched.lock)
2021 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2022 if *pprev == mp {
2023 *pprev = mp.alllink
2024 goto found
2025 }
2026 }
2027 throw("m not found in allm")
2028 found:
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 mp.freeWait.Store(freeMWait)
2044 mp.freelink = sched.freem
2045 sched.freem = mp
2046 unlock(&sched.lock)
2047
2048 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2049 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2050
2051
2052 handoffp(releasep())
2053
2054
2055
2056
2057
2058 lock(&sched.lock)
2059 sched.nmfreed++
2060 checkdead()
2061 unlock(&sched.lock)
2062
2063 if GOOS == "darwin" || GOOS == "ios" {
2064
2065
2066 if mp.signalPending.Load() != 0 {
2067 pendingPreemptSignals.Add(-1)
2068 }
2069 }
2070
2071
2072
2073 mdestroy(mp)
2074
2075 if osStack {
2076
2077 mp.freeWait.Store(freeMRef)
2078
2079
2080
2081 return
2082 }
2083
2084
2085
2086
2087
2088 exitThread(&mp.freeWait)
2089 }
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101 func forEachP(reason waitReason, fn func(*p)) {
2102 systemstack(func() {
2103 gp := getg().m.curg
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 casGToWaitingForSuspendG(gp, _Grunning, reason)
2116 forEachPInternal(fn)
2117 casgstatus(gp, _Gwaiting, _Grunning)
2118 })
2119 }
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 func forEachPInternal(fn func(*p)) {
2131 mp := acquirem()
2132 pp := getg().m.p.ptr()
2133
2134 lock(&sched.lock)
2135 if sched.safePointWait != 0 {
2136 throw("forEachP: sched.safePointWait != 0")
2137 }
2138 sched.safePointWait = gomaxprocs - 1
2139 sched.safePointFn = fn
2140
2141
2142 for _, p2 := range allp {
2143 if p2 != pp {
2144 atomic.Store(&p2.runSafePointFn, 1)
2145 }
2146 }
2147 preemptall()
2148
2149
2150
2151
2152
2153
2154
2155 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2156 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2157 fn(p)
2158 sched.safePointWait--
2159 }
2160 }
2161
2162 wait := sched.safePointWait > 0
2163 unlock(&sched.lock)
2164
2165
2166 fn(pp)
2167
2168
2169
2170 for _, p2 := range allp {
2171 if atomic.Load(&p2.runSafePointFn) != 1 {
2172
2173 continue
2174 }
2175 if thread, ok := setBlockOnExitSyscall(p2); ok {
2176 thread.takeP()
2177 thread.resume()
2178 handoffp(p2)
2179 }
2180 }
2181
2182
2183 if wait {
2184 for {
2185
2186
2187
2188
2189 if notetsleep(&sched.safePointNote, 100*1000) {
2190 noteclear(&sched.safePointNote)
2191 break
2192 }
2193 preemptall()
2194 }
2195 }
2196 if sched.safePointWait != 0 {
2197 throw("forEachP: not done")
2198 }
2199 for _, p2 := range allp {
2200 if p2.runSafePointFn != 0 {
2201 throw("forEachP: P did not run fn")
2202 }
2203 }
2204
2205 lock(&sched.lock)
2206 sched.safePointFn = nil
2207 unlock(&sched.lock)
2208 releasem(mp)
2209 }
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 func runSafePointFn() {
2223 p := getg().m.p.ptr()
2224
2225
2226
2227 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2228 return
2229 }
2230 sched.safePointFn(p)
2231 lock(&sched.lock)
2232 sched.safePointWait--
2233 if sched.safePointWait == 0 {
2234 notewakeup(&sched.safePointNote)
2235 }
2236 unlock(&sched.lock)
2237 }
2238
2239
2240
2241
2242 var cgoThreadStart unsafe.Pointer
2243
2244 type cgothreadstart struct {
2245 g guintptr
2246 tls *uint64
2247 fn unsafe.Pointer
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 func allocm(pp *p, fn func(), id int64) *m {
2260 allocmLock.rlock()
2261
2262
2263
2264
2265 acquirem()
2266
2267 gp := getg()
2268 if gp.m.p == 0 {
2269 acquirep(pp)
2270 }
2271
2272
2273
2274 if sched.freem != nil {
2275 lock(&sched.lock)
2276 var newList *m
2277 for freem := sched.freem; freem != nil; {
2278
2279 wait := freem.freeWait.Load()
2280 if wait == freeMWait {
2281 next := freem.freelink
2282 freem.freelink = newList
2283 newList = freem
2284 freem = next
2285 continue
2286 }
2287
2288
2289
2290 if traceEnabled() || traceShuttingDown() {
2291 traceThreadDestroy(freem)
2292 }
2293
2294
2295
2296 if wait == freeMStack {
2297
2298
2299
2300 systemstack(func() {
2301 stackfree(freem.g0.stack)
2302 if valgrindenabled {
2303 valgrindDeregisterStack(freem.g0.valgrindStackID)
2304 freem.g0.valgrindStackID = 0
2305 }
2306 })
2307 }
2308 freem = freem.freelink
2309 }
2310 sched.freem = newList
2311 unlock(&sched.lock)
2312 }
2313
2314 mp := &new(mPadded).m
2315 mp.mstartfn = fn
2316 mcommoninit(mp, id)
2317
2318
2319
2320 if iscgo || mStackIsSystemAllocated() {
2321 mp.g0 = malg(-1)
2322 } else {
2323 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2324 }
2325 mp.g0.m = mp
2326
2327 if pp == gp.m.p.ptr() {
2328 releasep()
2329 }
2330
2331 releasem(gp.m)
2332 allocmLock.runlock()
2333 return mp
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375 func needm(signal bool) {
2376 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2377
2378
2379
2380
2381
2382
2383 writeErrStr("fatal error: cgo callback before cgo call\n")
2384 exit(1)
2385 }
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 var sigmask sigset
2396 sigsave(&sigmask)
2397 sigblock(false)
2398
2399
2400
2401
2402 mp, last := getExtraM()
2403
2404
2405
2406
2407
2408
2409
2410
2411 mp.needextram = last
2412
2413
2414 mp.sigmask = sigmask
2415
2416
2417
2418 osSetupTLS(mp)
2419
2420
2421
2422 setg(mp.g0)
2423 sp := sys.GetCallerSP()
2424 callbackUpdateSystemStack(mp, sp, signal)
2425
2426
2427
2428
2429 mp.isExtraInC = false
2430
2431
2432 asminit()
2433 minit()
2434
2435
2436
2437
2438
2439
2440 var trace traceLocker
2441 if !signal {
2442 trace = traceAcquire()
2443 }
2444
2445
2446 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2447 sched.ngsys.Add(-1)
2448 sched.nGsyscallNoP.Add(1)
2449
2450 if !signal {
2451 if trace.ok() {
2452 trace.GoCreateSyscall(mp.curg)
2453 traceRelease(trace)
2454 }
2455 }
2456 mp.isExtraInSig = signal
2457 }
2458
2459
2460
2461
2462 func needAndBindM() {
2463 needm(false)
2464
2465 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2466 cgoBindM()
2467 }
2468 }
2469
2470
2471
2472
2473 func newextram() {
2474 c := extraMWaiters.Swap(0)
2475 if c > 0 {
2476 for i := uint32(0); i < c; i++ {
2477 oneNewExtraM()
2478 }
2479 } else if extraMLength.Load() == 0 {
2480
2481 oneNewExtraM()
2482 }
2483 }
2484
2485
2486 func oneNewExtraM() {
2487
2488
2489
2490
2491
2492 mp := allocm(nil, nil, -1)
2493 gp := malg(4096)
2494 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2495 gp.sched.sp = gp.stack.hi
2496 gp.sched.sp -= 4 * goarch.PtrSize
2497 gp.sched.lr = 0
2498 gp.sched.g = guintptr(unsafe.Pointer(gp))
2499 gp.syscallpc = gp.sched.pc
2500 gp.syscallsp = gp.sched.sp
2501 gp.stktopsp = gp.sched.sp
2502
2503
2504
2505 casgstatus(gp, _Gidle, _Gdeadextra)
2506 gp.m = mp
2507 mp.curg = gp
2508 mp.isextra = true
2509
2510 mp.isExtraInC = true
2511 mp.lockedInt++
2512 mp.lockedg.set(gp)
2513 gp.lockedm.set(mp)
2514 gp.goid = sched.goidgen.Add(1)
2515 if raceenabled {
2516 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2517 }
2518
2519 allgadd(gp)
2520
2521
2522
2523
2524
2525 sched.ngsys.Add(1)
2526
2527
2528 addExtraM(mp)
2529 }
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564 func dropm() {
2565
2566
2567
2568 mp := getg().m
2569
2570
2571
2572
2573
2574 var trace traceLocker
2575 if !mp.isExtraInSig {
2576 trace = traceAcquire()
2577 }
2578
2579
2580 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2581 mp.curg.preemptStop = false
2582 sched.ngsys.Add(1)
2583 sched.nGsyscallNoP.Add(-1)
2584
2585 if !mp.isExtraInSig {
2586 if trace.ok() {
2587 trace.GoDestroySyscall()
2588 traceRelease(trace)
2589 }
2590 }
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605 mp.syscalltick--
2606
2607
2608
2609 mp.curg.trace.reset()
2610
2611
2612
2613
2614 if traceEnabled() || traceShuttingDown() {
2615
2616
2617
2618
2619
2620
2621
2622 lock(&sched.lock)
2623 traceThreadDestroy(mp)
2624 unlock(&sched.lock)
2625 }
2626 mp.isExtraInSig = false
2627
2628
2629
2630
2631
2632 sigmask := mp.sigmask
2633 sigblock(false)
2634 unminit()
2635
2636 setg(nil)
2637
2638
2639
2640 g0 := mp.g0
2641 g0.stack.hi = 0
2642 g0.stack.lo = 0
2643 g0.stackguard0 = 0
2644 g0.stackguard1 = 0
2645 mp.g0StackAccurate = false
2646
2647 putExtraM(mp)
2648
2649 msigrestore(sigmask)
2650 }
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672 func cgoBindM() {
2673 if GOOS == "windows" || GOOS == "plan9" {
2674 fatal("bindm in unexpected GOOS")
2675 }
2676 g := getg()
2677 if g.m.g0 != g {
2678 fatal("the current g is not g0")
2679 }
2680 if _cgo_bindm != nil {
2681 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2682 }
2683 }
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696 func getm() uintptr {
2697 return uintptr(unsafe.Pointer(getg().m))
2698 }
2699
2700 var (
2701
2702
2703
2704
2705
2706
2707 extraM atomic.Uintptr
2708
2709 extraMLength atomic.Uint32
2710
2711 extraMWaiters atomic.Uint32
2712
2713
2714 extraMInUse atomic.Uint32
2715 )
2716
2717
2718
2719
2720
2721
2722
2723
2724 func lockextra(nilokay bool) *m {
2725 const locked = 1
2726
2727 incr := false
2728 for {
2729 old := extraM.Load()
2730 if old == locked {
2731 osyield_no_g()
2732 continue
2733 }
2734 if old == 0 && !nilokay {
2735 if !incr {
2736
2737
2738
2739 extraMWaiters.Add(1)
2740 incr = true
2741 }
2742 usleep_no_g(1)
2743 continue
2744 }
2745 if extraM.CompareAndSwap(old, locked) {
2746 return (*m)(unsafe.Pointer(old))
2747 }
2748 osyield_no_g()
2749 continue
2750 }
2751 }
2752
2753
2754 func unlockextra(mp *m, delta int32) {
2755 extraMLength.Add(delta)
2756 extraM.Store(uintptr(unsafe.Pointer(mp)))
2757 }
2758
2759
2760
2761
2762
2763
2764
2765
2766 func getExtraM() (mp *m, last bool) {
2767 mp = lockextra(false)
2768 extraMInUse.Add(1)
2769 unlockextra(mp.schedlink.ptr(), -1)
2770 return mp, mp.schedlink.ptr() == nil
2771 }
2772
2773
2774
2775
2776
2777 func putExtraM(mp *m) {
2778 extraMInUse.Add(-1)
2779 addExtraM(mp)
2780 }
2781
2782
2783
2784
2785 func addExtraM(mp *m) {
2786 mnext := lockextra(true)
2787 mp.schedlink.set(mnext)
2788 unlockextra(mp, 1)
2789 }
2790
2791 var (
2792
2793
2794
2795 allocmLock rwmutex
2796
2797
2798
2799
2800 execLock rwmutex
2801 )
2802
2803
2804
2805 const (
2806 failthreadcreate = "runtime: failed to create new OS thread\n"
2807 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2808 )
2809
2810
2811
2812
2813 var newmHandoff struct {
2814 lock mutex
2815
2816
2817
2818 newm muintptr
2819
2820
2821
2822 waiting bool
2823 wake note
2824
2825
2826
2827
2828 haveTemplateThread uint32
2829 }
2830
2831
2832
2833
2834
2835
2836
2837
2838 func newm(fn func(), pp *p, id int64) {
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 acquirem()
2850
2851 mp := allocm(pp, fn, id)
2852 mp.nextp.set(pp)
2853 mp.sigmask = initSigmask
2854 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866 lock(&newmHandoff.lock)
2867 if newmHandoff.haveTemplateThread == 0 {
2868 throw("on a locked thread with no template thread")
2869 }
2870 mp.schedlink = newmHandoff.newm
2871 newmHandoff.newm.set(mp)
2872 if newmHandoff.waiting {
2873 newmHandoff.waiting = false
2874 notewakeup(&newmHandoff.wake)
2875 }
2876 unlock(&newmHandoff.lock)
2877
2878
2879
2880 releasem(getg().m)
2881 return
2882 }
2883 newm1(mp)
2884 releasem(getg().m)
2885 }
2886
2887 func newm1(mp *m) {
2888 if iscgo {
2889 var ts cgothreadstart
2890 if _cgo_thread_start == nil {
2891 throw("_cgo_thread_start missing")
2892 }
2893 ts.g.set(mp.g0)
2894 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2895 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2896 if msanenabled {
2897 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2898 }
2899 if asanenabled {
2900 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2901 }
2902 execLock.rlock()
2903 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2904 execLock.runlock()
2905 return
2906 }
2907 execLock.rlock()
2908 newosproc(mp)
2909 execLock.runlock()
2910 }
2911
2912
2913
2914
2915
2916 func startTemplateThread() {
2917 if GOARCH == "wasm" {
2918 return
2919 }
2920
2921
2922
2923 mp := acquirem()
2924 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2925 releasem(mp)
2926 return
2927 }
2928 newm(templateThread, nil, -1)
2929 releasem(mp)
2930 }
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944 func templateThread() {
2945 lock(&sched.lock)
2946 sched.nmsys++
2947 checkdead()
2948 unlock(&sched.lock)
2949
2950 for {
2951 lock(&newmHandoff.lock)
2952 for newmHandoff.newm != 0 {
2953 newm := newmHandoff.newm.ptr()
2954 newmHandoff.newm = 0
2955 unlock(&newmHandoff.lock)
2956 for newm != nil {
2957 next := newm.schedlink.ptr()
2958 newm.schedlink = 0
2959 newm1(newm)
2960 newm = next
2961 }
2962 lock(&newmHandoff.lock)
2963 }
2964 newmHandoff.waiting = true
2965 noteclear(&newmHandoff.wake)
2966 unlock(&newmHandoff.lock)
2967 notesleep(&newmHandoff.wake)
2968 }
2969 }
2970
2971
2972
2973 func stopm() {
2974 gp := getg()
2975
2976 if gp.m.locks != 0 {
2977 throw("stopm holding locks")
2978 }
2979 if gp.m.p != 0 {
2980 throw("stopm holding p")
2981 }
2982 if gp.m.spinning {
2983 throw("stopm spinning")
2984 }
2985
2986 lock(&sched.lock)
2987 mput(gp.m)
2988 unlock(&sched.lock)
2989 mPark()
2990 acquirep(gp.m.nextp.ptr())
2991 gp.m.nextp = 0
2992 }
2993
2994 func mspinning() {
2995
2996 getg().m.spinning = true
2997 }
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016 func startm(pp *p, spinning, lockheld bool) {
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033 mp := acquirem()
3034 if !lockheld {
3035 lock(&sched.lock)
3036 }
3037 if pp == nil {
3038 if spinning {
3039
3040
3041
3042 throw("startm: P required for spinning=true")
3043 }
3044 pp, _ = pidleget(0)
3045 if pp == nil {
3046 if !lockheld {
3047 unlock(&sched.lock)
3048 }
3049 releasem(mp)
3050 return
3051 }
3052 }
3053 nmp := mget()
3054 if nmp == nil {
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069 id := mReserveID()
3070 unlock(&sched.lock)
3071
3072 var fn func()
3073 if spinning {
3074
3075 fn = mspinning
3076 }
3077 newm(fn, pp, id)
3078
3079 if lockheld {
3080 lock(&sched.lock)
3081 }
3082
3083
3084 releasem(mp)
3085 return
3086 }
3087 if !lockheld {
3088 unlock(&sched.lock)
3089 }
3090 if nmp.spinning {
3091 throw("startm: m is spinning")
3092 }
3093 if nmp.nextp != 0 {
3094 throw("startm: m has p")
3095 }
3096 if spinning && !runqempty(pp) {
3097 throw("startm: p has runnable gs")
3098 }
3099
3100 nmp.spinning = spinning
3101 nmp.nextp.set(pp)
3102 notewakeup(&nmp.park)
3103
3104
3105 releasem(mp)
3106 }
3107
3108
3109
3110
3111
3112 func handoffp(pp *p) {
3113
3114
3115
3116
3117 if !runqempty(pp) || !sched.runq.empty() {
3118 startm(pp, false, false)
3119 return
3120 }
3121
3122 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3123 startm(pp, false, false)
3124 return
3125 }
3126
3127 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3128 startm(pp, false, false)
3129 return
3130 }
3131
3132
3133 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3134 sched.needspinning.Store(0)
3135 startm(pp, true, false)
3136 return
3137 }
3138 lock(&sched.lock)
3139 if sched.gcwaiting.Load() {
3140 pp.status = _Pgcstop
3141 pp.gcStopTime = nanotime()
3142 sched.stopwait--
3143 if sched.stopwait == 0 {
3144 notewakeup(&sched.stopnote)
3145 }
3146 unlock(&sched.lock)
3147 return
3148 }
3149 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3150 sched.safePointFn(pp)
3151 sched.safePointWait--
3152 if sched.safePointWait == 0 {
3153 notewakeup(&sched.safePointNote)
3154 }
3155 }
3156 if !sched.runq.empty() {
3157 unlock(&sched.lock)
3158 startm(pp, false, false)
3159 return
3160 }
3161
3162
3163 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3164 unlock(&sched.lock)
3165 startm(pp, false, false)
3166 return
3167 }
3168
3169
3170
3171 when := pp.timers.wakeTime()
3172 pidleput(pp, 0)
3173 unlock(&sched.lock)
3174
3175 if when != 0 {
3176 wakeNetPoller(when)
3177 }
3178 }
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193 func wakep() {
3194
3195
3196 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3197 return
3198 }
3199
3200
3201
3202
3203
3204
3205 mp := acquirem()
3206
3207 var pp *p
3208 lock(&sched.lock)
3209 pp, _ = pidlegetSpinning(0)
3210 if pp == nil {
3211 if sched.nmspinning.Add(-1) < 0 {
3212 throw("wakep: negative nmspinning")
3213 }
3214 unlock(&sched.lock)
3215 releasem(mp)
3216 return
3217 }
3218
3219
3220
3221
3222 unlock(&sched.lock)
3223
3224 startm(pp, true, false)
3225
3226 releasem(mp)
3227 }
3228
3229
3230
3231 func stoplockedm() {
3232 gp := getg()
3233
3234 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3235 throw("stoplockedm: inconsistent locking")
3236 }
3237 if gp.m.p != 0 {
3238
3239 pp := releasep()
3240 handoffp(pp)
3241 }
3242 incidlelocked(1)
3243
3244 mPark()
3245 status := readgstatus(gp.m.lockedg.ptr())
3246 if status&^_Gscan != _Grunnable {
3247 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3248 dumpgstatus(gp.m.lockedg.ptr())
3249 throw("stoplockedm: not runnable")
3250 }
3251 acquirep(gp.m.nextp.ptr())
3252 gp.m.nextp = 0
3253 }
3254
3255
3256
3257
3258
3259 func startlockedm(gp *g) {
3260 mp := gp.lockedm.ptr()
3261 if mp == getg().m {
3262 throw("startlockedm: locked to me")
3263 }
3264 if mp.nextp != 0 {
3265 throw("startlockedm: m has p")
3266 }
3267
3268 incidlelocked(-1)
3269 pp := releasep()
3270 mp.nextp.set(pp)
3271 notewakeup(&mp.park)
3272 stopm()
3273 }
3274
3275
3276
3277 func gcstopm() {
3278 gp := getg()
3279
3280 if !sched.gcwaiting.Load() {
3281 throw("gcstopm: not waiting for gc")
3282 }
3283 if gp.m.spinning {
3284 gp.m.spinning = false
3285
3286
3287 if sched.nmspinning.Add(-1) < 0 {
3288 throw("gcstopm: negative nmspinning")
3289 }
3290 }
3291 pp := releasep()
3292 lock(&sched.lock)
3293 pp.status = _Pgcstop
3294 pp.gcStopTime = nanotime()
3295 sched.stopwait--
3296 if sched.stopwait == 0 {
3297 notewakeup(&sched.stopnote)
3298 }
3299 unlock(&sched.lock)
3300 stopm()
3301 }
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312 func execute(gp *g, inheritTime bool) {
3313 mp := getg().m
3314
3315 if goroutineProfile.active {
3316
3317
3318
3319 tryRecordGoroutineProfile(gp, nil, osyield)
3320 }
3321
3322
3323 mp.curg = gp
3324 gp.m = mp
3325 gp.syncSafePoint = false
3326 casgstatus(gp, _Grunnable, _Grunning)
3327 gp.waitsince = 0
3328 gp.preempt = false
3329 gp.stackguard0 = gp.stack.lo + stackGuard
3330 if !inheritTime {
3331 mp.p.ptr().schedtick++
3332 }
3333
3334
3335 hz := sched.profilehz
3336 if mp.profilehz != hz {
3337 setThreadCPUProfiler(hz)
3338 }
3339
3340 trace := traceAcquire()
3341 if trace.ok() {
3342 trace.GoStart()
3343 traceRelease(trace)
3344 }
3345
3346 gogo(&gp.sched)
3347 }
3348
3349
3350
3351
3352
3353 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3354 mp := getg().m
3355
3356
3357
3358
3359
3360 top:
3361
3362
3363
3364 mp.clearAllpSnapshot()
3365
3366 pp := mp.p.ptr()
3367 if sched.gcwaiting.Load() {
3368 gcstopm()
3369 goto top
3370 }
3371 if pp.runSafePointFn != 0 {
3372 runSafePointFn()
3373 }
3374
3375
3376
3377
3378
3379 now, pollUntil, _ := pp.timers.check(0, nil)
3380
3381
3382 if traceEnabled() || traceShuttingDown() {
3383 gp := traceReader()
3384 if gp != nil {
3385 trace := traceAcquire()
3386 casgstatus(gp, _Gwaiting, _Grunnable)
3387 if trace.ok() {
3388 trace.GoUnpark(gp, 0)
3389 traceRelease(trace)
3390 }
3391 return gp, false, true
3392 }
3393 }
3394
3395
3396 if gcBlackenEnabled != 0 {
3397 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3398 if gp != nil {
3399 return gp, false, true
3400 }
3401 now = tnow
3402 }
3403
3404
3405
3406
3407 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3408 lock(&sched.lock)
3409 gp := globrunqget()
3410 unlock(&sched.lock)
3411 if gp != nil {
3412 return gp, false, false
3413 }
3414 }
3415
3416
3417 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3418 if gp := wakefing(); gp != nil {
3419 ready(gp, 0, true)
3420 }
3421 }
3422
3423
3424 if gcCleanups.needsWake() {
3425 gcCleanups.wake()
3426 }
3427
3428 if *cgo_yield != nil {
3429 asmcgocall(*cgo_yield, nil)
3430 }
3431
3432
3433 if gp, inheritTime := runqget(pp); gp != nil {
3434 return gp, inheritTime, false
3435 }
3436
3437
3438 if !sched.runq.empty() {
3439 lock(&sched.lock)
3440 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3441 unlock(&sched.lock)
3442 if gp != nil {
3443 if runqputbatch(pp, &q); !q.empty() {
3444 throw("Couldn't put Gs into empty local runq")
3445 }
3446 return gp, false, false
3447 }
3448 }
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3460 list, delta := netpoll(0)
3461 sched.pollingNet.Store(0)
3462 if !list.empty() {
3463 gp := list.pop()
3464 injectglist(&list)
3465 netpollAdjustWaiters(delta)
3466 trace := traceAcquire()
3467 casgstatus(gp, _Gwaiting, _Grunnable)
3468 if trace.ok() {
3469 trace.GoUnpark(gp, 0)
3470 traceRelease(trace)
3471 }
3472 return gp, false, false
3473 }
3474 }
3475
3476
3477
3478
3479
3480
3481 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3482 if !mp.spinning {
3483 mp.becomeSpinning()
3484 }
3485
3486 gp, inheritTime, tnow, w, newWork := stealWork(now)
3487 if gp != nil {
3488
3489 return gp, inheritTime, false
3490 }
3491 if newWork {
3492
3493
3494 goto top
3495 }
3496
3497 now = tnow
3498 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3499
3500 pollUntil = w
3501 }
3502 }
3503
3504
3505
3506
3507
3508 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3509 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3510 if node != nil {
3511 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3512 gp := node.gp.ptr()
3513
3514 trace := traceAcquire()
3515 casgstatus(gp, _Gwaiting, _Grunnable)
3516 if trace.ok() {
3517 trace.GoUnpark(gp, 0)
3518 traceRelease(trace)
3519 }
3520 return gp, false, false
3521 }
3522 gcController.removeIdleMarkWorker()
3523 }
3524
3525
3526
3527
3528
3529 gp, otherReady := beforeIdle(now, pollUntil)
3530 if gp != nil {
3531 trace := traceAcquire()
3532 casgstatus(gp, _Gwaiting, _Grunnable)
3533 if trace.ok() {
3534 trace.GoUnpark(gp, 0)
3535 traceRelease(trace)
3536 }
3537 return gp, false, false
3538 }
3539 if otherReady {
3540 goto top
3541 }
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551 allpSnapshot := mp.snapshotAllp()
3552
3553
3554 idlepMaskSnapshot := idlepMask
3555 timerpMaskSnapshot := timerpMask
3556
3557
3558 lock(&sched.lock)
3559 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3560 unlock(&sched.lock)
3561 goto top
3562 }
3563 if !sched.runq.empty() {
3564 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3565 unlock(&sched.lock)
3566 if gp == nil {
3567 throw("global runq empty with non-zero runqsize")
3568 }
3569 if runqputbatch(pp, &q); !q.empty() {
3570 throw("Couldn't put Gs into empty local runq")
3571 }
3572 return gp, false, false
3573 }
3574 if !mp.spinning && sched.needspinning.Load() == 1 {
3575
3576 mp.becomeSpinning()
3577 unlock(&sched.lock)
3578 goto top
3579 }
3580 if releasep() != pp {
3581 throw("findrunnable: wrong p")
3582 }
3583 now = pidleput(pp, now)
3584 unlock(&sched.lock)
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622 wasSpinning := mp.spinning
3623 if mp.spinning {
3624 mp.spinning = false
3625 if sched.nmspinning.Add(-1) < 0 {
3626 throw("findrunnable: negative nmspinning")
3627 }
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640 lock(&sched.lock)
3641 if !sched.runq.empty() {
3642 pp, _ := pidlegetSpinning(0)
3643 if pp != nil {
3644 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3645 unlock(&sched.lock)
3646 if gp == nil {
3647 throw("global runq empty with non-zero runqsize")
3648 }
3649 if runqputbatch(pp, &q); !q.empty() {
3650 throw("Couldn't put Gs into empty local runq")
3651 }
3652 acquirep(pp)
3653 mp.becomeSpinning()
3654 return gp, false, false
3655 }
3656 }
3657 unlock(&sched.lock)
3658
3659 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3660 if pp != nil {
3661 acquirep(pp)
3662 mp.becomeSpinning()
3663 goto top
3664 }
3665
3666
3667 pp, gp := checkIdleGCNoP()
3668 if pp != nil {
3669 acquirep(pp)
3670 mp.becomeSpinning()
3671
3672
3673 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3674 trace := traceAcquire()
3675 casgstatus(gp, _Gwaiting, _Grunnable)
3676 if trace.ok() {
3677 trace.GoUnpark(gp, 0)
3678 traceRelease(trace)
3679 }
3680 return gp, false, false
3681 }
3682
3683
3684
3685
3686
3687
3688
3689 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3690 }
3691
3692
3693
3694
3695
3696 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3697 sched.pollUntil.Store(pollUntil)
3698 if mp.p != 0 {
3699 throw("findrunnable: netpoll with p")
3700 }
3701 if mp.spinning {
3702 throw("findrunnable: netpoll with spinning")
3703 }
3704 delay := int64(-1)
3705 if pollUntil != 0 {
3706 if now == 0 {
3707 now = nanotime()
3708 }
3709 delay = pollUntil - now
3710 if delay < 0 {
3711 delay = 0
3712 }
3713 }
3714 if faketime != 0 {
3715
3716 delay = 0
3717 }
3718 list, delta := netpoll(delay)
3719
3720 now = nanotime()
3721 sched.pollUntil.Store(0)
3722 sched.lastpoll.Store(now)
3723 if faketime != 0 && list.empty() {
3724
3725
3726 stopm()
3727 goto top
3728 }
3729 lock(&sched.lock)
3730 pp, _ := pidleget(now)
3731 unlock(&sched.lock)
3732 if pp == nil {
3733 injectglist(&list)
3734 netpollAdjustWaiters(delta)
3735 } else {
3736 acquirep(pp)
3737 if !list.empty() {
3738 gp := list.pop()
3739 injectglist(&list)
3740 netpollAdjustWaiters(delta)
3741 trace := traceAcquire()
3742 casgstatus(gp, _Gwaiting, _Grunnable)
3743 if trace.ok() {
3744 trace.GoUnpark(gp, 0)
3745 traceRelease(trace)
3746 }
3747 return gp, false, false
3748 }
3749 if wasSpinning {
3750 mp.becomeSpinning()
3751 }
3752 goto top
3753 }
3754 } else if pollUntil != 0 && netpollinited() {
3755 pollerPollUntil := sched.pollUntil.Load()
3756 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3757 netpollBreak()
3758 }
3759 }
3760 stopm()
3761 goto top
3762 }
3763
3764
3765
3766
3767
3768 func pollWork() bool {
3769 if !sched.runq.empty() {
3770 return true
3771 }
3772 p := getg().m.p.ptr()
3773 if !runqempty(p) {
3774 return true
3775 }
3776 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3777 if list, delta := netpoll(0); !list.empty() {
3778 injectglist(&list)
3779 netpollAdjustWaiters(delta)
3780 return true
3781 }
3782 }
3783 return false
3784 }
3785
3786
3787
3788
3789
3790
3791
3792 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3793 pp := getg().m.p.ptr()
3794
3795 ranTimer := false
3796
3797 const stealTries = 4
3798 for i := 0; i < stealTries; i++ {
3799 stealTimersOrRunNextG := i == stealTries-1
3800
3801 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3802 if sched.gcwaiting.Load() {
3803
3804 return nil, false, now, pollUntil, true
3805 }
3806 p2 := allp[enum.position()]
3807 if pp == p2 {
3808 continue
3809 }
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3825 tnow, w, ran := p2.timers.check(now, nil)
3826 now = tnow
3827 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3828 pollUntil = w
3829 }
3830 if ran {
3831
3832
3833
3834
3835
3836
3837
3838
3839 if gp, inheritTime := runqget(pp); gp != nil {
3840 return gp, inheritTime, now, pollUntil, ranTimer
3841 }
3842 ranTimer = true
3843 }
3844 }
3845
3846
3847 if !idlepMask.read(enum.position()) {
3848 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3849 return gp, false, now, pollUntil, ranTimer
3850 }
3851 }
3852 }
3853 }
3854
3855
3856
3857
3858 return nil, false, now, pollUntil, ranTimer
3859 }
3860
3861
3862
3863
3864
3865
3866 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3867 for id, p2 := range allpSnapshot {
3868 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3869 lock(&sched.lock)
3870 pp, _ := pidlegetSpinning(0)
3871 if pp == nil {
3872
3873 unlock(&sched.lock)
3874 return nil
3875 }
3876 unlock(&sched.lock)
3877 return pp
3878 }
3879 }
3880
3881
3882 return nil
3883 }
3884
3885
3886
3887
3888 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3889 for id, p2 := range allpSnapshot {
3890 if timerpMaskSnapshot.read(uint32(id)) {
3891 w := p2.timers.wakeTime()
3892 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3893 pollUntil = w
3894 }
3895 }
3896 }
3897
3898 return pollUntil
3899 }
3900
3901
3902
3903
3904
3905 func checkIdleGCNoP() (*p, *g) {
3906
3907
3908
3909
3910
3911
3912 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3913 return nil, nil
3914 }
3915 if !gcShouldScheduleWorker(nil) {
3916 return nil, nil
3917 }
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936 lock(&sched.lock)
3937 pp, now := pidlegetSpinning(0)
3938 if pp == nil {
3939 unlock(&sched.lock)
3940 return nil, nil
3941 }
3942
3943
3944 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3945 pidleput(pp, now)
3946 unlock(&sched.lock)
3947 return nil, nil
3948 }
3949
3950 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3951 if node == nil {
3952 pidleput(pp, now)
3953 unlock(&sched.lock)
3954 gcController.removeIdleMarkWorker()
3955 return nil, nil
3956 }
3957
3958 unlock(&sched.lock)
3959
3960 return pp, node.gp.ptr()
3961 }
3962
3963
3964
3965
3966 func wakeNetPoller(when int64) {
3967 if sched.lastpoll.Load() == 0 {
3968
3969
3970
3971
3972 pollerPollUntil := sched.pollUntil.Load()
3973 if pollerPollUntil == 0 || pollerPollUntil > when {
3974 netpollBreak()
3975 }
3976 } else {
3977
3978
3979 if GOOS != "plan9" {
3980 wakep()
3981 }
3982 }
3983 }
3984
3985 func resetspinning() {
3986 gp := getg()
3987 if !gp.m.spinning {
3988 throw("resetspinning: not a spinning m")
3989 }
3990 gp.m.spinning = false
3991 nmspinning := sched.nmspinning.Add(-1)
3992 if nmspinning < 0 {
3993 throw("findrunnable: negative nmspinning")
3994 }
3995
3996
3997
3998 wakep()
3999 }
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009 func injectglist(glist *gList) {
4010 if glist.empty() {
4011 return
4012 }
4013
4014
4015
4016 var tail *g
4017 trace := traceAcquire()
4018 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4019 tail = gp
4020 casgstatus(gp, _Gwaiting, _Grunnable)
4021 if trace.ok() {
4022 trace.GoUnpark(gp, 0)
4023 }
4024 }
4025 if trace.ok() {
4026 traceRelease(trace)
4027 }
4028
4029
4030 q := gQueue{glist.head, tail.guintptr(), glist.size}
4031 *glist = gList{}
4032
4033 startIdle := func(n int32) {
4034 for ; n > 0; n-- {
4035 mp := acquirem()
4036 lock(&sched.lock)
4037
4038 pp, _ := pidlegetSpinning(0)
4039 if pp == nil {
4040 unlock(&sched.lock)
4041 releasem(mp)
4042 break
4043 }
4044
4045 startm(pp, false, true)
4046 unlock(&sched.lock)
4047 releasem(mp)
4048 }
4049 }
4050
4051 pp := getg().m.p.ptr()
4052 if pp == nil {
4053 n := q.size
4054 lock(&sched.lock)
4055 globrunqputbatch(&q)
4056 unlock(&sched.lock)
4057 startIdle(n)
4058 return
4059 }
4060
4061 var globq gQueue
4062 npidle := sched.npidle.Load()
4063 for ; npidle > 0 && !q.empty(); npidle-- {
4064 g := q.pop()
4065 globq.pushBack(g)
4066 }
4067 if !globq.empty() {
4068 n := globq.size
4069 lock(&sched.lock)
4070 globrunqputbatch(&globq)
4071 unlock(&sched.lock)
4072 startIdle(n)
4073 }
4074
4075 if runqputbatch(pp, &q); !q.empty() {
4076 lock(&sched.lock)
4077 globrunqputbatch(&q)
4078 unlock(&sched.lock)
4079 }
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094 wakep()
4095 }
4096
4097
4098
4099 func schedule() {
4100 mp := getg().m
4101
4102 if mp.locks != 0 {
4103 throw("schedule: holding locks")
4104 }
4105
4106 if mp.lockedg != 0 {
4107 stoplockedm()
4108 execute(mp.lockedg.ptr(), false)
4109 }
4110
4111
4112
4113 if mp.incgo {
4114 throw("schedule: in cgo")
4115 }
4116
4117 top:
4118 pp := mp.p.ptr()
4119 pp.preempt = false
4120
4121
4122
4123
4124 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4125 throw("schedule: spinning with local work")
4126 }
4127
4128 gp, inheritTime, tryWakeP := findRunnable()
4129
4130
4131
4132
4133 mp.clearAllpSnapshot()
4134
4135 if debug.dontfreezetheworld > 0 && freezing.Load() {
4136
4137
4138
4139
4140
4141
4142
4143 lock(&deadlock)
4144 lock(&deadlock)
4145 }
4146
4147
4148
4149
4150 if mp.spinning {
4151 resetspinning()
4152 }
4153
4154 if sched.disable.user && !schedEnabled(gp) {
4155
4156
4157
4158 lock(&sched.lock)
4159 if schedEnabled(gp) {
4160
4161
4162 unlock(&sched.lock)
4163 } else {
4164 sched.disable.runnable.pushBack(gp)
4165 unlock(&sched.lock)
4166 goto top
4167 }
4168 }
4169
4170
4171
4172 if tryWakeP {
4173 wakep()
4174 }
4175 if gp.lockedm != 0 {
4176
4177
4178 startlockedm(gp)
4179 goto top
4180 }
4181
4182 execute(gp, inheritTime)
4183 }
4184
4185
4186
4187
4188
4189
4190
4191
4192 func dropg() {
4193 gp := getg()
4194
4195 setMNoWB(&gp.m.curg.m, nil)
4196 setGNoWB(&gp.m.curg, nil)
4197 }
4198
4199 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4200 unlock((*mutex)(lock))
4201 return true
4202 }
4203
4204
4205 func park_m(gp *g) {
4206 mp := getg().m
4207
4208 trace := traceAcquire()
4209
4210
4211
4212
4213
4214 bubble := gp.bubble
4215 if bubble != nil {
4216 bubble.incActive()
4217 }
4218
4219 if trace.ok() {
4220
4221
4222
4223 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4224 }
4225
4226
4227 casgstatus(gp, _Grunning, _Gwaiting)
4228 if trace.ok() {
4229 traceRelease(trace)
4230 }
4231
4232 dropg()
4233
4234 if fn := mp.waitunlockf; fn != nil {
4235 ok := fn(gp, mp.waitlock)
4236 mp.waitunlockf = nil
4237 mp.waitlock = nil
4238 if !ok {
4239 trace := traceAcquire()
4240 casgstatus(gp, _Gwaiting, _Grunnable)
4241 if bubble != nil {
4242 bubble.decActive()
4243 }
4244 if trace.ok() {
4245 trace.GoUnpark(gp, 2)
4246 traceRelease(trace)
4247 }
4248 execute(gp, true)
4249 }
4250 }
4251
4252 if bubble != nil {
4253 bubble.decActive()
4254 }
4255
4256 schedule()
4257 }
4258
4259 func goschedImpl(gp *g, preempted bool) {
4260 trace := traceAcquire()
4261 status := readgstatus(gp)
4262 if status&^_Gscan != _Grunning {
4263 dumpgstatus(gp)
4264 throw("bad g status")
4265 }
4266 if trace.ok() {
4267
4268
4269
4270 if preempted {
4271 trace.GoPreempt()
4272 } else {
4273 trace.GoSched()
4274 }
4275 }
4276 casgstatus(gp, _Grunning, _Grunnable)
4277 if trace.ok() {
4278 traceRelease(trace)
4279 }
4280
4281 dropg()
4282 lock(&sched.lock)
4283 globrunqput(gp)
4284 unlock(&sched.lock)
4285
4286 if mainStarted {
4287 wakep()
4288 }
4289
4290 schedule()
4291 }
4292
4293
4294 func gosched_m(gp *g) {
4295 goschedImpl(gp, false)
4296 }
4297
4298
4299 func goschedguarded_m(gp *g) {
4300 if !canPreemptM(gp.m) {
4301 gogo(&gp.sched)
4302 }
4303 goschedImpl(gp, false)
4304 }
4305
4306 func gopreempt_m(gp *g) {
4307 goschedImpl(gp, true)
4308 }
4309
4310
4311
4312
4313 func preemptPark(gp *g) {
4314 status := readgstatus(gp)
4315 if status&^_Gscan != _Grunning {
4316 dumpgstatus(gp)
4317 throw("bad g status")
4318 }
4319
4320 if gp.asyncSafePoint {
4321
4322
4323
4324 f := findfunc(gp.sched.pc)
4325 if !f.valid() {
4326 throw("preempt at unknown pc")
4327 }
4328 if f.flag&abi.FuncFlagSPWrite != 0 {
4329 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4330 throw("preempt SPWRITE")
4331 }
4332 }
4333
4334
4335
4336
4337
4338
4339
4340 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4341 dropg()
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363 trace := traceAcquire()
4364 if trace.ok() {
4365 trace.GoPark(traceBlockPreempted, 0)
4366 }
4367 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4368 if trace.ok() {
4369 traceRelease(trace)
4370 }
4371 schedule()
4372 }
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388 func goyield() {
4389 checkTimeouts()
4390 mcall(goyield_m)
4391 }
4392
4393 func goyield_m(gp *g) {
4394 trace := traceAcquire()
4395 pp := gp.m.p.ptr()
4396 if trace.ok() {
4397
4398
4399
4400 trace.GoPreempt()
4401 }
4402 casgstatus(gp, _Grunning, _Grunnable)
4403 if trace.ok() {
4404 traceRelease(trace)
4405 }
4406 dropg()
4407 runqput(pp, gp, false)
4408 schedule()
4409 }
4410
4411
4412 func goexit1() {
4413 if raceenabled {
4414 if gp := getg(); gp.bubble != nil {
4415 racereleasemergeg(gp, gp.bubble.raceaddr())
4416 }
4417 racegoend()
4418 }
4419 trace := traceAcquire()
4420 if trace.ok() {
4421 trace.GoEnd()
4422 traceRelease(trace)
4423 }
4424 mcall(goexit0)
4425 }
4426
4427
4428 func goexit0(gp *g) {
4429 gdestroy(gp)
4430 schedule()
4431 }
4432
4433 func gdestroy(gp *g) {
4434 mp := getg().m
4435 pp := mp.p.ptr()
4436
4437 casgstatus(gp, _Grunning, _Gdead)
4438 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4439 if isSystemGoroutine(gp, false) {
4440 sched.ngsys.Add(-1)
4441 }
4442 gp.m = nil
4443 locked := gp.lockedm != 0
4444 gp.lockedm = 0
4445 mp.lockedg = 0
4446 gp.preemptStop = false
4447 gp.paniconfault = false
4448 gp._defer = nil
4449 gp._panic = nil
4450 gp.writebuf = nil
4451 gp.waitreason = waitReasonZero
4452 gp.param = nil
4453 gp.labels = nil
4454 gp.timer = nil
4455 gp.bubble = nil
4456
4457 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4458
4459
4460
4461 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4462 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4463 gcController.bgScanCredit.Add(scanCredit)
4464 gp.gcAssistBytes = 0
4465 }
4466
4467 dropg()
4468
4469 if GOARCH == "wasm" {
4470 gfput(pp, gp)
4471 return
4472 }
4473
4474 if locked && mp.lockedInt != 0 {
4475 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4476 if mp.isextra {
4477 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4478 }
4479 throw("exited a goroutine internally locked to the OS thread")
4480 }
4481 gfput(pp, gp)
4482 if locked {
4483
4484
4485
4486
4487
4488
4489 if GOOS != "plan9" {
4490 gogo(&mp.g0.sched)
4491 } else {
4492
4493
4494 mp.lockedExt = 0
4495 }
4496 }
4497 }
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507 func save(pc, sp, bp uintptr) {
4508 gp := getg()
4509
4510 if gp == gp.m.g0 || gp == gp.m.gsignal {
4511
4512
4513
4514
4515
4516 throw("save on system g not allowed")
4517 }
4518
4519 gp.sched.pc = pc
4520 gp.sched.sp = sp
4521 gp.sched.lr = 0
4522 gp.sched.bp = bp
4523
4524
4525
4526 if gp.sched.ctxt != nil {
4527 badctxt()
4528 }
4529 }
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555 func reentersyscall(pc, sp, bp uintptr) {
4556 gp := getg()
4557
4558
4559
4560 gp.m.locks++
4561
4562
4563
4564
4565
4566 gp.stackguard0 = stackPreempt
4567 gp.throwsplit = true
4568
4569
4570 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4571
4572 pp := gp.m.p.ptr()
4573 if pp.runSafePointFn != 0 {
4574
4575 systemstack(runSafePointFn)
4576 }
4577 gp.m.oldp.set(pp)
4578
4579
4580 save(pc, sp, bp)
4581 gp.syscallsp = sp
4582 gp.syscallpc = pc
4583 gp.syscallbp = bp
4584
4585
4586 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4587 systemstack(func() {
4588 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4589 throw("entersyscall")
4590 })
4591 }
4592 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4593 systemstack(func() {
4594 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4595 throw("entersyscall")
4596 })
4597 }
4598 trace := traceAcquire()
4599 if trace.ok() {
4600
4601
4602
4603
4604 systemstack(func() {
4605 trace.GoSysCall()
4606 })
4607
4608 save(pc, sp, bp)
4609 }
4610 if sched.gcwaiting.Load() {
4611
4612
4613
4614 systemstack(func() {
4615 entersyscallHandleGCWait(trace)
4616 })
4617
4618 save(pc, sp, bp)
4619 }
4620
4621
4622
4623
4624
4625 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4626 casgstatus(gp, _Grunning, _Gsyscall)
4627 }
4628 if staticLockRanking {
4629
4630 save(pc, sp, bp)
4631 }
4632 if trace.ok() {
4633
4634
4635
4636 traceRelease(trace)
4637 }
4638 if sched.sysmonwait.Load() {
4639 systemstack(entersyscallWakeSysmon)
4640
4641 save(pc, sp, bp)
4642 }
4643 gp.m.locks--
4644 }
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660 func entersyscall() {
4661
4662
4663
4664
4665 fp := getcallerfp()
4666 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4667 }
4668
4669 func entersyscallWakeSysmon() {
4670 lock(&sched.lock)
4671 if sched.sysmonwait.Load() {
4672 sched.sysmonwait.Store(false)
4673 notewakeup(&sched.sysmonnote)
4674 }
4675 unlock(&sched.lock)
4676 }
4677
4678 func entersyscallHandleGCWait(trace traceLocker) {
4679 gp := getg()
4680
4681 lock(&sched.lock)
4682 if sched.stopwait > 0 {
4683
4684 pp := gp.m.p.ptr()
4685 pp.m = 0
4686 gp.m.p = 0
4687 atomic.Store(&pp.status, _Pgcstop)
4688
4689 if trace.ok() {
4690 trace.ProcStop(pp)
4691 }
4692 sched.nGsyscallNoP.Add(1)
4693 pp.gcStopTime = nanotime()
4694 pp.syscalltick++
4695 if sched.stopwait--; sched.stopwait == 0 {
4696 notewakeup(&sched.stopnote)
4697 }
4698 }
4699 unlock(&sched.lock)
4700 }
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714 func entersyscallblock() {
4715 gp := getg()
4716
4717 gp.m.locks++
4718 gp.throwsplit = true
4719 gp.stackguard0 = stackPreempt
4720 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4721 gp.m.p.ptr().syscalltick++
4722
4723 sched.nGsyscallNoP.Add(1)
4724
4725
4726 pc := sys.GetCallerPC()
4727 sp := sys.GetCallerSP()
4728 bp := getcallerfp()
4729 save(pc, sp, bp)
4730 gp.syscallsp = gp.sched.sp
4731 gp.syscallpc = gp.sched.pc
4732 gp.syscallbp = gp.sched.bp
4733 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4734 sp1 := sp
4735 sp2 := gp.sched.sp
4736 sp3 := gp.syscallsp
4737 systemstack(func() {
4738 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4739 throw("entersyscallblock")
4740 })
4741 }
4742
4743
4744
4745
4746
4747
4748 trace := traceAcquire()
4749 systemstack(func() {
4750 if trace.ok() {
4751 trace.GoSysCall()
4752 }
4753 handoffp(releasep())
4754 })
4755
4756
4757
4758 casgstatus(gp, _Grunning, _Gsyscall)
4759 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4760 systemstack(func() {
4761 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4762 throw("entersyscallblock")
4763 })
4764 }
4765 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4766 systemstack(func() {
4767 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4768 throw("entersyscallblock")
4769 })
4770 }
4771 if trace.ok() {
4772 systemstack(func() {
4773 traceRelease(trace)
4774 })
4775 }
4776
4777
4778 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4779
4780 gp.m.locks--
4781 }
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803 func exitsyscall() {
4804 gp := getg()
4805
4806 gp.m.locks++
4807 if sys.GetCallerSP() > gp.syscallsp {
4808 throw("exitsyscall: syscall frame is no longer valid")
4809 }
4810 gp.waitsince = 0
4811
4812 if sched.stopwait == freezeStopWait {
4813
4814
4815
4816 systemstack(func() {
4817 lock(&deadlock)
4818 lock(&deadlock)
4819 })
4820 }
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4834 casgstatus(gp, _Gsyscall, _Grunning)
4835 }
4836
4837
4838
4839
4840
4841
4842 oldp := gp.m.oldp.ptr()
4843 gp.m.oldp.set(nil)
4844
4845
4846 pp := gp.m.p.ptr()
4847 if pp != nil {
4848
4849 if trace := traceAcquire(); trace.ok() {
4850 systemstack(func() {
4851
4852
4853
4854
4855
4856
4857
4858
4859 if pp.syscalltick == gp.m.syscalltick {
4860 trace.GoSysExit(false)
4861 } else {
4862
4863
4864
4865
4866 trace.ProcSteal(pp)
4867 trace.ProcStart()
4868 trace.GoSysExit(true)
4869 trace.GoStart()
4870 }
4871 traceRelease(trace)
4872 })
4873 }
4874 } else {
4875
4876 systemstack(func() {
4877
4878 if pp := exitsyscallTryGetP(oldp); pp != nil {
4879
4880 acquirepNoTrace(pp)
4881
4882
4883 if trace := traceAcquire(); trace.ok() {
4884 trace.ProcStart()
4885 trace.GoSysExit(true)
4886 trace.GoStart()
4887 traceRelease(trace)
4888 }
4889 }
4890 })
4891 pp = gp.m.p.ptr()
4892 }
4893
4894
4895 if pp != nil {
4896 if goroutineProfile.active {
4897
4898
4899
4900 systemstack(func() {
4901 tryRecordGoroutineProfileWB(gp)
4902 })
4903 }
4904
4905
4906 pp.syscalltick++
4907
4908
4909
4910 gp.syscallsp = 0
4911 gp.m.locks--
4912 if gp.preempt {
4913
4914 gp.stackguard0 = stackPreempt
4915 } else {
4916
4917 gp.stackguard0 = gp.stack.lo + stackGuard
4918 }
4919 gp.throwsplit = false
4920
4921 if sched.disable.user && !schedEnabled(gp) {
4922
4923 Gosched()
4924 }
4925 return
4926 }
4927
4928 gp.m.locks--
4929
4930
4931 mcall(exitsyscallNoP)
4932
4933
4934
4935
4936
4937
4938
4939 gp.syscallsp = 0
4940 gp.m.p.ptr().syscalltick++
4941 gp.throwsplit = false
4942 }
4943
4944
4945
4946
4947
4948
4949
4950 func exitsyscallTryGetP(oldp *p) *p {
4951
4952 if oldp != nil {
4953 if thread, ok := setBlockOnExitSyscall(oldp); ok {
4954 thread.takeP()
4955 thread.resume()
4956 sched.nGsyscallNoP.Add(-1)
4957 return oldp
4958 }
4959 }
4960
4961
4962 if sched.pidle != 0 {
4963 lock(&sched.lock)
4964 pp, _ := pidleget(0)
4965 if pp != nil && sched.sysmonwait.Load() {
4966 sched.sysmonwait.Store(false)
4967 notewakeup(&sched.sysmonnote)
4968 }
4969 unlock(&sched.lock)
4970 if pp != nil {
4971 sched.nGsyscallNoP.Add(-1)
4972 return pp
4973 }
4974 }
4975 return nil
4976 }
4977
4978
4979
4980
4981
4982
4983
4984 func exitsyscallNoP(gp *g) {
4985 traceExitingSyscall()
4986 trace := traceAcquire()
4987 casgstatus(gp, _Grunning, _Grunnable)
4988 traceExitedSyscall()
4989 if trace.ok() {
4990
4991
4992
4993
4994 trace.GoSysExit(true)
4995 traceRelease(trace)
4996 }
4997 sched.nGsyscallNoP.Add(-1)
4998 dropg()
4999 lock(&sched.lock)
5000 var pp *p
5001 if schedEnabled(gp) {
5002 pp, _ = pidleget(0)
5003 }
5004 var locked bool
5005 if pp == nil {
5006 globrunqput(gp)
5007
5008
5009
5010
5011
5012
5013 locked = gp.lockedm != 0
5014 } else if sched.sysmonwait.Load() {
5015 sched.sysmonwait.Store(false)
5016 notewakeup(&sched.sysmonnote)
5017 }
5018 unlock(&sched.lock)
5019 if pp != nil {
5020 acquirep(pp)
5021 execute(gp, false)
5022 }
5023 if locked {
5024
5025
5026
5027
5028 stoplockedm()
5029 execute(gp, false)
5030 }
5031 stopm()
5032 schedule()
5033 }
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047 func syscall_runtime_BeforeFork() {
5048 gp := getg().m.curg
5049
5050
5051
5052
5053 gp.m.locks++
5054 sigsave(&gp.m.sigmask)
5055 sigblock(false)
5056
5057
5058
5059
5060
5061 gp.stackguard0 = stackFork
5062 }
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076 func syscall_runtime_AfterFork() {
5077 gp := getg().m.curg
5078
5079
5080 gp.stackguard0 = gp.stack.lo + stackGuard
5081
5082 msigrestore(gp.m.sigmask)
5083
5084 gp.m.locks--
5085 }
5086
5087
5088
5089 var inForkedChild bool
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110 func syscall_runtime_AfterForkInChild() {
5111
5112
5113
5114
5115 inForkedChild = true
5116
5117 clearSignalHandlers()
5118
5119
5120
5121 msigrestore(getg().m.sigmask)
5122
5123 inForkedChild = false
5124 }
5125
5126
5127
5128
5129 var pendingPreemptSignals atomic.Int32
5130
5131
5132
5133
5134 func syscall_runtime_BeforeExec() {
5135
5136 execLock.lock()
5137
5138
5139
5140 if GOOS == "darwin" || GOOS == "ios" {
5141 for pendingPreemptSignals.Load() > 0 {
5142 osyield()
5143 }
5144 }
5145 }
5146
5147
5148
5149
5150 func syscall_runtime_AfterExec() {
5151 execLock.unlock()
5152 }
5153
5154
5155 func malg(stacksize int32) *g {
5156 newg := new(g)
5157 if stacksize >= 0 {
5158 stacksize = round2(stackSystem + stacksize)
5159 systemstack(func() {
5160 newg.stack = stackalloc(uint32(stacksize))
5161 if valgrindenabled {
5162 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5163 }
5164 })
5165 newg.stackguard0 = newg.stack.lo + stackGuard
5166 newg.stackguard1 = ^uintptr(0)
5167
5168
5169 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5170 }
5171 return newg
5172 }
5173
5174
5175
5176
5177 func newproc(fn *funcval) {
5178 gp := getg()
5179 pc := sys.GetCallerPC()
5180 systemstack(func() {
5181 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5182
5183 pp := getg().m.p.ptr()
5184 runqput(pp, newg, true)
5185
5186 if mainStarted {
5187 wakep()
5188 }
5189 })
5190 }
5191
5192
5193
5194
5195 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5196 if fn == nil {
5197 fatal("go of nil func value")
5198 }
5199
5200 mp := acquirem()
5201 pp := mp.p.ptr()
5202 newg := gfget(pp)
5203 if newg == nil {
5204 newg = malg(stackMin)
5205 casgstatus(newg, _Gidle, _Gdead)
5206 allgadd(newg)
5207 }
5208 if newg.stack.hi == 0 {
5209 throw("newproc1: newg missing stack")
5210 }
5211
5212 if readgstatus(newg) != _Gdead {
5213 throw("newproc1: new g is not Gdead")
5214 }
5215
5216 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5217 totalSize = alignUp(totalSize, sys.StackAlign)
5218 sp := newg.stack.hi - totalSize
5219 if usesLR {
5220
5221 *(*uintptr)(unsafe.Pointer(sp)) = 0
5222 prepGoExitFrame(sp)
5223 }
5224 if GOARCH == "arm64" {
5225
5226 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5227 }
5228
5229 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5230 newg.sched.sp = sp
5231 newg.stktopsp = sp
5232 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5233 newg.sched.g = guintptr(unsafe.Pointer(newg))
5234 gostartcallfn(&newg.sched, fn)
5235 newg.parentGoid = callergp.goid
5236 newg.gopc = callerpc
5237 newg.ancestors = saveAncestors(callergp)
5238 newg.startpc = fn.fn
5239 newg.runningCleanups.Store(false)
5240 if isSystemGoroutine(newg, false) {
5241 sched.ngsys.Add(1)
5242 } else {
5243
5244 newg.bubble = callergp.bubble
5245 if mp.curg != nil {
5246 newg.labels = mp.curg.labels
5247 }
5248 if goroutineProfile.active {
5249
5250
5251
5252
5253
5254 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5255 }
5256 }
5257
5258 newg.trackingSeq = uint8(cheaprand())
5259 if newg.trackingSeq%gTrackingPeriod == 0 {
5260 newg.tracking = true
5261 }
5262 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5263
5264
5265
5266 trace := traceAcquire()
5267 var status uint32 = _Grunnable
5268 if parked {
5269 status = _Gwaiting
5270 newg.waitreason = waitreason
5271 }
5272 if pp.goidcache == pp.goidcacheend {
5273
5274
5275
5276 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5277 pp.goidcache -= _GoidCacheBatch - 1
5278 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5279 }
5280 newg.goid = pp.goidcache
5281 casgstatus(newg, _Gdead, status)
5282 pp.goidcache++
5283 newg.trace.reset()
5284 if trace.ok() {
5285 trace.GoCreate(newg, newg.startpc, parked)
5286 traceRelease(trace)
5287 }
5288
5289
5290 if raceenabled {
5291 newg.racectx = racegostart(callerpc)
5292 newg.raceignore = 0
5293 if newg.labels != nil {
5294
5295
5296 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5297 }
5298 }
5299 pp.goroutinesCreated++
5300 releasem(mp)
5301
5302 return newg
5303 }
5304
5305
5306
5307
5308 func saveAncestors(callergp *g) *[]ancestorInfo {
5309
5310 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5311 return nil
5312 }
5313 var callerAncestors []ancestorInfo
5314 if callergp.ancestors != nil {
5315 callerAncestors = *callergp.ancestors
5316 }
5317 n := int32(len(callerAncestors)) + 1
5318 if n > debug.tracebackancestors {
5319 n = debug.tracebackancestors
5320 }
5321 ancestors := make([]ancestorInfo, n)
5322 copy(ancestors[1:], callerAncestors)
5323
5324 var pcs [tracebackInnerFrames]uintptr
5325 npcs := gcallers(callergp, 0, pcs[:])
5326 ipcs := make([]uintptr, npcs)
5327 copy(ipcs, pcs[:])
5328 ancestors[0] = ancestorInfo{
5329 pcs: ipcs,
5330 goid: callergp.goid,
5331 gopc: callergp.gopc,
5332 }
5333
5334 ancestorsp := new([]ancestorInfo)
5335 *ancestorsp = ancestors
5336 return ancestorsp
5337 }
5338
5339
5340
5341 func gfput(pp *p, gp *g) {
5342 if readgstatus(gp) != _Gdead {
5343 throw("gfput: bad status (not Gdead)")
5344 }
5345
5346 stksize := gp.stack.hi - gp.stack.lo
5347
5348 if stksize != uintptr(startingStackSize) {
5349
5350 stackfree(gp.stack)
5351 gp.stack.lo = 0
5352 gp.stack.hi = 0
5353 gp.stackguard0 = 0
5354 if valgrindenabled {
5355 valgrindDeregisterStack(gp.valgrindStackID)
5356 gp.valgrindStackID = 0
5357 }
5358 }
5359
5360 pp.gFree.push(gp)
5361 if pp.gFree.size >= 64 {
5362 var (
5363 stackQ gQueue
5364 noStackQ gQueue
5365 )
5366 for pp.gFree.size >= 32 {
5367 gp := pp.gFree.pop()
5368 if gp.stack.lo == 0 {
5369 noStackQ.push(gp)
5370 } else {
5371 stackQ.push(gp)
5372 }
5373 }
5374 lock(&sched.gFree.lock)
5375 sched.gFree.noStack.pushAll(noStackQ)
5376 sched.gFree.stack.pushAll(stackQ)
5377 unlock(&sched.gFree.lock)
5378 }
5379 }
5380
5381
5382
5383 func gfget(pp *p) *g {
5384 retry:
5385 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5386 lock(&sched.gFree.lock)
5387
5388 for pp.gFree.size < 32 {
5389
5390 gp := sched.gFree.stack.pop()
5391 if gp == nil {
5392 gp = sched.gFree.noStack.pop()
5393 if gp == nil {
5394 break
5395 }
5396 }
5397 pp.gFree.push(gp)
5398 }
5399 unlock(&sched.gFree.lock)
5400 goto retry
5401 }
5402 gp := pp.gFree.pop()
5403 if gp == nil {
5404 return nil
5405 }
5406 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5407
5408
5409
5410 systemstack(func() {
5411 stackfree(gp.stack)
5412 gp.stack.lo = 0
5413 gp.stack.hi = 0
5414 gp.stackguard0 = 0
5415 if valgrindenabled {
5416 valgrindDeregisterStack(gp.valgrindStackID)
5417 gp.valgrindStackID = 0
5418 }
5419 })
5420 }
5421 if gp.stack.lo == 0 {
5422
5423 systemstack(func() {
5424 gp.stack = stackalloc(startingStackSize)
5425 if valgrindenabled {
5426 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5427 }
5428 })
5429 gp.stackguard0 = gp.stack.lo + stackGuard
5430 } else {
5431 if raceenabled {
5432 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5433 }
5434 if msanenabled {
5435 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5436 }
5437 if asanenabled {
5438 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5439 }
5440 }
5441 return gp
5442 }
5443
5444
5445 func gfpurge(pp *p) {
5446 var (
5447 stackQ gQueue
5448 noStackQ gQueue
5449 )
5450 for !pp.gFree.empty() {
5451 gp := pp.gFree.pop()
5452 if gp.stack.lo == 0 {
5453 noStackQ.push(gp)
5454 } else {
5455 stackQ.push(gp)
5456 }
5457 }
5458 lock(&sched.gFree.lock)
5459 sched.gFree.noStack.pushAll(noStackQ)
5460 sched.gFree.stack.pushAll(stackQ)
5461 unlock(&sched.gFree.lock)
5462 }
5463
5464
5465 func Breakpoint() {
5466 breakpoint()
5467 }
5468
5469
5470
5471
5472
5473
5474 func dolockOSThread() {
5475 if GOARCH == "wasm" {
5476 return
5477 }
5478 gp := getg()
5479 gp.m.lockedg.set(gp)
5480 gp.lockedm.set(gp.m)
5481 }
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499 func LockOSThread() {
5500 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5501
5502
5503
5504 startTemplateThread()
5505 }
5506 gp := getg()
5507 gp.m.lockedExt++
5508 if gp.m.lockedExt == 0 {
5509 gp.m.lockedExt--
5510 panic("LockOSThread nesting overflow")
5511 }
5512 dolockOSThread()
5513 }
5514
5515
5516 func lockOSThread() {
5517 getg().m.lockedInt++
5518 dolockOSThread()
5519 }
5520
5521
5522
5523
5524
5525
5526 func dounlockOSThread() {
5527 if GOARCH == "wasm" {
5528 return
5529 }
5530 gp := getg()
5531 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5532 return
5533 }
5534 gp.m.lockedg = 0
5535 gp.lockedm = 0
5536 }
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552 func UnlockOSThread() {
5553 gp := getg()
5554 if gp.m.lockedExt == 0 {
5555 return
5556 }
5557 gp.m.lockedExt--
5558 dounlockOSThread()
5559 }
5560
5561
5562 func unlockOSThread() {
5563 gp := getg()
5564 if gp.m.lockedInt == 0 {
5565 systemstack(badunlockosthread)
5566 }
5567 gp.m.lockedInt--
5568 dounlockOSThread()
5569 }
5570
5571 func badunlockosthread() {
5572 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5573 }
5574
5575 func gcount(includeSys bool) int32 {
5576 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5577 if !includeSys {
5578 n -= sched.ngsys.Load()
5579 }
5580 for _, pp := range allp {
5581 n -= pp.gFree.size
5582 }
5583
5584
5585
5586 if n < 1 {
5587 n = 1
5588 }
5589 return n
5590 }
5591
5592
5593
5594
5595
5596 func goroutineleakcount() int {
5597 return work.goroutineLeak.count
5598 }
5599
5600 func mcount() int32 {
5601 return int32(sched.mnext - sched.nmfreed)
5602 }
5603
5604 var prof struct {
5605 signalLock atomic.Uint32
5606
5607
5608
5609 hz atomic.Int32
5610 }
5611
5612 func _System() { _System() }
5613 func _ExternalCode() { _ExternalCode() }
5614 func _LostExternalCode() { _LostExternalCode() }
5615 func _GC() { _GC() }
5616 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5617 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5618 func _VDSO() { _VDSO() }
5619
5620
5621
5622
5623
5624 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5625 if prof.hz.Load() == 0 {
5626 return
5627 }
5628
5629
5630
5631
5632 if mp != nil && mp.profilehz == 0 {
5633 return
5634 }
5635
5636
5637
5638
5639
5640
5641
5642 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5643 if f := findfunc(pc); f.valid() {
5644 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5645 cpuprof.lostAtomic++
5646 return
5647 }
5648 }
5649 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5650
5651
5652
5653 cpuprof.lostAtomic++
5654 return
5655 }
5656 }
5657
5658
5659
5660
5661
5662
5663
5664 getg().m.mallocing++
5665
5666 var u unwinder
5667 var stk [maxCPUProfStack]uintptr
5668 n := 0
5669 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5670 cgoOff := 0
5671
5672
5673
5674
5675
5676 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5677 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5678 cgoOff++
5679 }
5680 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5681 mp.cgoCallers[0] = 0
5682 }
5683
5684
5685 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5686 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5687
5688
5689 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5690 } else if mp != nil && mp.vdsoSP != 0 {
5691
5692
5693 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5694 } else {
5695 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5696 }
5697 n += tracebackPCs(&u, 0, stk[n:])
5698
5699 if n <= 0 {
5700
5701
5702 n = 2
5703 if inVDSOPage(pc) {
5704 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5705 } else if pc > firstmoduledata.etext {
5706
5707 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5708 }
5709 stk[0] = pc
5710 if mp.preemptoff != "" {
5711 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5712 } else {
5713 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5714 }
5715 }
5716
5717 if prof.hz.Load() != 0 {
5718
5719
5720
5721 var tagPtr *unsafe.Pointer
5722 if gp != nil && gp.m != nil && gp.m.curg != nil {
5723 tagPtr = &gp.m.curg.labels
5724 }
5725 cpuprof.add(tagPtr, stk[:n])
5726
5727 gprof := gp
5728 var mp *m
5729 var pp *p
5730 if gp != nil && gp.m != nil {
5731 if gp.m.curg != nil {
5732 gprof = gp.m.curg
5733 }
5734 mp = gp.m
5735 pp = gp.m.p.ptr()
5736 }
5737 traceCPUSample(gprof, mp, pp, stk[:n])
5738 }
5739 getg().m.mallocing--
5740 }
5741
5742
5743
5744 func setcpuprofilerate(hz int32) {
5745
5746 if hz < 0 {
5747 hz = 0
5748 }
5749
5750
5751
5752 gp := getg()
5753 gp.m.locks++
5754
5755
5756
5757
5758 setThreadCPUProfiler(0)
5759
5760 for !prof.signalLock.CompareAndSwap(0, 1) {
5761 osyield()
5762 }
5763 if prof.hz.Load() != hz {
5764 setProcessCPUProfiler(hz)
5765 prof.hz.Store(hz)
5766 }
5767 prof.signalLock.Store(0)
5768
5769 lock(&sched.lock)
5770 sched.profilehz = hz
5771 unlock(&sched.lock)
5772
5773 if hz != 0 {
5774 setThreadCPUProfiler(hz)
5775 }
5776
5777 gp.m.locks--
5778 }
5779
5780
5781
5782 func (pp *p) init(id int32) {
5783 pp.id = id
5784 pp.gcw.id = id
5785 pp.status = _Pgcstop
5786 pp.sudogcache = pp.sudogbuf[:0]
5787 pp.deferpool = pp.deferpoolbuf[:0]
5788 pp.wbBuf.reset()
5789 if pp.mcache == nil {
5790 if id == 0 {
5791 if mcache0 == nil {
5792 throw("missing mcache?")
5793 }
5794
5795
5796 pp.mcache = mcache0
5797 } else {
5798 pp.mcache = allocmcache()
5799 }
5800 }
5801 if raceenabled && pp.raceprocctx == 0 {
5802 if id == 0 {
5803 pp.raceprocctx = raceprocctx0
5804 raceprocctx0 = 0
5805 } else {
5806 pp.raceprocctx = raceproccreate()
5807 }
5808 }
5809 lockInit(&pp.timers.mu, lockRankTimers)
5810
5811
5812
5813 timerpMask.set(id)
5814
5815
5816 idlepMask.clear(id)
5817 }
5818
5819
5820
5821
5822
5823 func (pp *p) destroy() {
5824 assertLockHeld(&sched.lock)
5825 assertWorldStopped()
5826
5827
5828 for pp.runqhead != pp.runqtail {
5829
5830 pp.runqtail--
5831 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5832
5833 globrunqputhead(gp)
5834 }
5835 if pp.runnext != 0 {
5836 globrunqputhead(pp.runnext.ptr())
5837 pp.runnext = 0
5838 }
5839
5840
5841 getg().m.p.ptr().timers.take(&pp.timers)
5842
5843
5844
5845 if phase := gcphase; phase != _GCoff {
5846 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5847 throw("P destroyed while GC is running")
5848 }
5849
5850 pp.gcw.spanq.destroy()
5851
5852 clear(pp.sudogbuf[:])
5853 pp.sudogcache = pp.sudogbuf[:0]
5854 pp.pinnerCache = nil
5855 clear(pp.deferpoolbuf[:])
5856 pp.deferpool = pp.deferpoolbuf[:0]
5857 systemstack(func() {
5858 for i := 0; i < pp.mspancache.len; i++ {
5859
5860 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5861 }
5862 pp.mspancache.len = 0
5863 lock(&mheap_.lock)
5864 pp.pcache.flush(&mheap_.pages)
5865 unlock(&mheap_.lock)
5866 })
5867 freemcache(pp.mcache)
5868 pp.mcache = nil
5869 gfpurge(pp)
5870 if raceenabled {
5871 if pp.timers.raceCtx != 0 {
5872
5873
5874
5875
5876
5877 mp := getg().m
5878 phold := mp.p.ptr()
5879 mp.p.set(pp)
5880
5881 racectxend(pp.timers.raceCtx)
5882 pp.timers.raceCtx = 0
5883
5884 mp.p.set(phold)
5885 }
5886 raceprocdestroy(pp.raceprocctx)
5887 pp.raceprocctx = 0
5888 }
5889 pp.gcAssistTime = 0
5890 gcCleanups.queued += pp.cleanupsQueued
5891 pp.cleanupsQueued = 0
5892 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5893 pp.goroutinesCreated = 0
5894 pp.xRegs.free()
5895 pp.status = _Pdead
5896 }
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906 func procresize(nprocs int32) *p {
5907 assertLockHeld(&sched.lock)
5908 assertWorldStopped()
5909
5910 old := gomaxprocs
5911 if old < 0 || nprocs <= 0 {
5912 throw("procresize: invalid arg")
5913 }
5914 trace := traceAcquire()
5915 if trace.ok() {
5916 trace.Gomaxprocs(nprocs)
5917 traceRelease(trace)
5918 }
5919
5920
5921 now := nanotime()
5922 if sched.procresizetime != 0 {
5923 sched.totaltime += int64(old) * (now - sched.procresizetime)
5924 }
5925 sched.procresizetime = now
5926
5927
5928 if nprocs > int32(len(allp)) {
5929
5930
5931 lock(&allpLock)
5932 if nprocs <= int32(cap(allp)) {
5933 allp = allp[:nprocs]
5934 } else {
5935 nallp := make([]*p, nprocs)
5936
5937
5938 copy(nallp, allp[:cap(allp)])
5939 allp = nallp
5940 }
5941
5942 idlepMask = idlepMask.resize(nprocs)
5943 timerpMask = timerpMask.resize(nprocs)
5944 work.spanqMask = work.spanqMask.resize(nprocs)
5945 unlock(&allpLock)
5946 }
5947
5948
5949 for i := old; i < nprocs; i++ {
5950 pp := allp[i]
5951 if pp == nil {
5952 pp = new(p)
5953 }
5954 pp.init(i)
5955 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5956 }
5957
5958 gp := getg()
5959 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5960
5961 gp.m.p.ptr().status = _Prunning
5962 gp.m.p.ptr().mcache.prepareForSweep()
5963 } else {
5964
5965
5966
5967
5968
5969 if gp.m.p != 0 {
5970 trace := traceAcquire()
5971 if trace.ok() {
5972
5973
5974
5975 trace.GoSched()
5976 trace.ProcStop(gp.m.p.ptr())
5977 traceRelease(trace)
5978 }
5979 gp.m.p.ptr().m = 0
5980 }
5981 gp.m.p = 0
5982 pp := allp[0]
5983 pp.m = 0
5984 pp.status = _Pidle
5985 acquirep(pp)
5986 trace := traceAcquire()
5987 if trace.ok() {
5988 trace.GoStart()
5989 traceRelease(trace)
5990 }
5991 }
5992
5993
5994 mcache0 = nil
5995
5996
5997 for i := nprocs; i < old; i++ {
5998 pp := allp[i]
5999 pp.destroy()
6000
6001 }
6002
6003
6004 if int32(len(allp)) != nprocs {
6005 lock(&allpLock)
6006 allp = allp[:nprocs]
6007 idlepMask = idlepMask.resize(nprocs)
6008 timerpMask = timerpMask.resize(nprocs)
6009 work.spanqMask = work.spanqMask.resize(nprocs)
6010 unlock(&allpLock)
6011 }
6012
6013 var runnablePs *p
6014 for i := nprocs - 1; i >= 0; i-- {
6015 pp := allp[i]
6016 if gp.m.p.ptr() == pp {
6017 continue
6018 }
6019 pp.status = _Pidle
6020 if runqempty(pp) {
6021 pidleput(pp, now)
6022 } else {
6023 pp.m.set(mget())
6024 pp.link.set(runnablePs)
6025 runnablePs = pp
6026 }
6027 }
6028 stealOrder.reset(uint32(nprocs))
6029 var int32p *int32 = &gomaxprocs
6030 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6031 if old != nprocs {
6032
6033 gcCPULimiter.resetCapacity(now, nprocs)
6034 }
6035 return runnablePs
6036 }
6037
6038
6039
6040
6041
6042
6043
6044 func acquirep(pp *p) {
6045
6046 acquirepNoTrace(pp)
6047
6048
6049 trace := traceAcquire()
6050 if trace.ok() {
6051 trace.ProcStart()
6052 traceRelease(trace)
6053 }
6054 }
6055
6056
6057
6058
6059 func acquirepNoTrace(pp *p) {
6060
6061 wirep(pp)
6062
6063
6064
6065
6066
6067 pp.mcache.prepareForSweep()
6068 }
6069
6070
6071
6072
6073
6074
6075
6076 func wirep(pp *p) {
6077 gp := getg()
6078
6079 if gp.m.p != 0 {
6080
6081
6082 systemstack(func() {
6083 throw("wirep: already in go")
6084 })
6085 }
6086 if pp.m != 0 || pp.status != _Pidle {
6087
6088
6089 systemstack(func() {
6090 id := int64(0)
6091 if pp.m != 0 {
6092 id = pp.m.ptr().id
6093 }
6094 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6095 throw("wirep: invalid p state")
6096 })
6097 }
6098 gp.m.p.set(pp)
6099 pp.m.set(gp.m)
6100 pp.status = _Prunning
6101 }
6102
6103
6104 func releasep() *p {
6105 trace := traceAcquire()
6106 if trace.ok() {
6107 trace.ProcStop(getg().m.p.ptr())
6108 traceRelease(trace)
6109 }
6110 return releasepNoTrace()
6111 }
6112
6113
6114 func releasepNoTrace() *p {
6115 gp := getg()
6116
6117 if gp.m.p == 0 {
6118 throw("releasep: invalid arg")
6119 }
6120 pp := gp.m.p.ptr()
6121 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6122 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6123 throw("releasep: invalid p state")
6124 }
6125 gp.m.p = 0
6126 pp.m = 0
6127 pp.status = _Pidle
6128 return pp
6129 }
6130
6131 func incidlelocked(v int32) {
6132 lock(&sched.lock)
6133 sched.nmidlelocked += v
6134 if v > 0 {
6135 checkdead()
6136 }
6137 unlock(&sched.lock)
6138 }
6139
6140
6141
6142
6143 func checkdead() {
6144 assertLockHeld(&sched.lock)
6145
6146
6147
6148
6149
6150
6151 if (islibrary || isarchive) && GOARCH != "wasm" {
6152 return
6153 }
6154
6155
6156
6157
6158
6159 if panicking.Load() > 0 {
6160 return
6161 }
6162
6163
6164
6165
6166
6167 var run0 int32
6168 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6169 run0 = 1
6170 }
6171
6172 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6173 if run > run0 {
6174 return
6175 }
6176 if run < 0 {
6177 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6178 unlock(&sched.lock)
6179 throw("checkdead: inconsistent counts")
6180 }
6181
6182 grunning := 0
6183 forEachG(func(gp *g) {
6184 if isSystemGoroutine(gp, false) {
6185 return
6186 }
6187 s := readgstatus(gp)
6188 switch s &^ _Gscan {
6189 case _Gwaiting,
6190 _Gpreempted:
6191 grunning++
6192 case _Grunnable,
6193 _Grunning,
6194 _Gsyscall:
6195 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6196 unlock(&sched.lock)
6197 throw("checkdead: runnable g")
6198 }
6199 })
6200 if grunning == 0 {
6201 unlock(&sched.lock)
6202 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6203 }
6204
6205
6206 if faketime != 0 {
6207 if when := timeSleepUntil(); when < maxWhen {
6208 faketime = when
6209
6210
6211 pp, _ := pidleget(faketime)
6212 if pp == nil {
6213
6214
6215 unlock(&sched.lock)
6216 throw("checkdead: no p for timer")
6217 }
6218 mp := mget()
6219 if mp == nil {
6220
6221
6222 unlock(&sched.lock)
6223 throw("checkdead: no m for timer")
6224 }
6225
6226
6227
6228 sched.nmspinning.Add(1)
6229 mp.spinning = true
6230 mp.nextp.set(pp)
6231 notewakeup(&mp.park)
6232 return
6233 }
6234 }
6235
6236
6237 for _, pp := range allp {
6238 if len(pp.timers.heap) > 0 {
6239 return
6240 }
6241 }
6242
6243 unlock(&sched.lock)
6244 fatal("all goroutines are asleep - deadlock!")
6245 }
6246
6247
6248
6249
6250
6251
6252 var forcegcperiod int64 = 2 * 60 * 1e9
6253
6254
6255
6256
6257 const haveSysmon = GOARCH != "wasm"
6258
6259
6260
6261
6262 func sysmon() {
6263 lock(&sched.lock)
6264 sched.nmsys++
6265 checkdead()
6266 unlock(&sched.lock)
6267
6268 lastgomaxprocs := int64(0)
6269 lasttrace := int64(0)
6270 idle := 0
6271 delay := uint32(0)
6272
6273 for {
6274 if idle == 0 {
6275 delay = 20
6276 } else if idle > 50 {
6277 delay *= 2
6278 }
6279 if delay > 10*1000 {
6280 delay = 10 * 1000
6281 }
6282 usleep(delay)
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299 now := nanotime()
6300 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6301 lock(&sched.lock)
6302 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6303 syscallWake := false
6304 next := timeSleepUntil()
6305 if next > now {
6306 sched.sysmonwait.Store(true)
6307 unlock(&sched.lock)
6308
6309
6310 sleep := forcegcperiod / 2
6311 if next-now < sleep {
6312 sleep = next - now
6313 }
6314 shouldRelax := sleep >= osRelaxMinNS
6315 if shouldRelax {
6316 osRelax(true)
6317 }
6318 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6319 if shouldRelax {
6320 osRelax(false)
6321 }
6322 lock(&sched.lock)
6323 sched.sysmonwait.Store(false)
6324 noteclear(&sched.sysmonnote)
6325 }
6326 if syscallWake {
6327 idle = 0
6328 delay = 20
6329 }
6330 }
6331 unlock(&sched.lock)
6332 }
6333
6334 lock(&sched.sysmonlock)
6335
6336
6337 now = nanotime()
6338
6339
6340 if *cgo_yield != nil {
6341 asmcgocall(*cgo_yield, nil)
6342 }
6343
6344 lastpoll := sched.lastpoll.Load()
6345 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6346 sched.lastpoll.CompareAndSwap(lastpoll, now)
6347 list, delta := netpoll(0)
6348 if !list.empty() {
6349
6350
6351
6352
6353
6354
6355
6356 incidlelocked(-1)
6357 injectglist(&list)
6358 incidlelocked(1)
6359 netpollAdjustWaiters(delta)
6360 }
6361 }
6362
6363 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6364 sysmonUpdateGOMAXPROCS()
6365 lastgomaxprocs = now
6366 }
6367 if scavenger.sysmonWake.Load() != 0 {
6368
6369 scavenger.wake()
6370 }
6371
6372
6373 if retake(now) != 0 {
6374 idle = 0
6375 } else {
6376 idle++
6377 }
6378
6379 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6380 lock(&forcegc.lock)
6381 forcegc.idle.Store(false)
6382 var list gList
6383 list.push(forcegc.g)
6384 injectglist(&list)
6385 unlock(&forcegc.lock)
6386 }
6387 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6388 lasttrace = now
6389 schedtrace(debug.scheddetail > 0)
6390 }
6391 unlock(&sched.sysmonlock)
6392 }
6393 }
6394
6395 type sysmontick struct {
6396 schedtick uint32
6397 syscalltick uint32
6398 schedwhen int64
6399 syscallwhen int64
6400 }
6401
6402
6403
6404 const forcePreemptNS = 10 * 1000 * 1000
6405
6406 func retake(now int64) uint32 {
6407 n := 0
6408
6409
6410 lock(&allpLock)
6411
6412
6413
6414 for i := 0; i < len(allp); i++ {
6415
6416
6417
6418
6419
6420
6421
6422
6423 pp := allp[i]
6424 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6425
6426
6427 continue
6428 }
6429 pd := &pp.sysmontick
6430 sysretake := false
6431
6432
6433
6434
6435
6436 schedt := int64(pp.schedtick)
6437 if int64(pd.schedtick) != schedt {
6438 pd.schedtick = uint32(schedt)
6439 pd.schedwhen = now
6440 } else if pd.schedwhen+forcePreemptNS <= now {
6441 preemptone(pp)
6442
6443
6444
6445
6446 sysretake = true
6447 }
6448
6449
6450 unlock(&allpLock)
6451
6452
6453
6454
6455
6456
6457
6458
6459 incidlelocked(-1)
6460
6461
6462 thread, ok := setBlockOnExitSyscall(pp)
6463 if !ok {
6464
6465 goto done
6466 }
6467
6468
6469 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6470 pd.syscalltick = uint32(syst)
6471 pd.syscallwhen = now
6472 thread.resume()
6473 goto done
6474 }
6475
6476
6477
6478
6479 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6480 thread.resume()
6481 goto done
6482 }
6483
6484
6485
6486 thread.takeP()
6487 thread.resume()
6488 n++
6489
6490
6491 handoffp(pp)
6492
6493
6494
6495 done:
6496 incidlelocked(1)
6497 lock(&allpLock)
6498 }
6499 unlock(&allpLock)
6500 return uint32(n)
6501 }
6502
6503
6504
6505 type syscallingThread struct {
6506 gp *g
6507 mp *m
6508 pp *p
6509 status uint32
6510 }
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6527 if pp.status != _Prunning {
6528 return syscallingThread{}, false
6529 }
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541 mp := pp.m.ptr()
6542 if mp == nil {
6543
6544 return syscallingThread{}, false
6545 }
6546 gp := mp.curg
6547 if gp == nil {
6548
6549 return syscallingThread{}, false
6550 }
6551 status := readgstatus(gp) &^ _Gscan
6552
6553
6554
6555
6556 if status != _Gsyscall && status != _Gdeadextra {
6557
6558 return syscallingThread{}, false
6559 }
6560 if !castogscanstatus(gp, status, status|_Gscan) {
6561
6562 return syscallingThread{}, false
6563 }
6564 if gp.m != mp || gp.m.p.ptr() != pp {
6565
6566 casfrom_Gscanstatus(gp, status|_Gscan, status)
6567 return syscallingThread{}, false
6568 }
6569 return syscallingThread{gp, mp, pp, status}, true
6570 }
6571
6572
6573
6574
6575
6576 func (s syscallingThread) gcstopP() {
6577 assertLockHeld(&sched.lock)
6578
6579 s.releaseP(_Pgcstop)
6580 s.pp.gcStopTime = nanotime()
6581 sched.stopwait--
6582 }
6583
6584
6585
6586 func (s syscallingThread) takeP() {
6587 s.releaseP(_Pidle)
6588 }
6589
6590
6591
6592
6593 func (s syscallingThread) releaseP(state uint32) {
6594 if state != _Pidle && state != _Pgcstop {
6595 throw("attempted to release P into a bad state")
6596 }
6597 trace := traceAcquire()
6598 s.pp.m = 0
6599 s.mp.p = 0
6600 atomic.Store(&s.pp.status, state)
6601 if trace.ok() {
6602 trace.ProcSteal(s.pp)
6603 traceRelease(trace)
6604 }
6605 sched.nGsyscallNoP.Add(1)
6606 s.pp.syscalltick++
6607 }
6608
6609
6610 func (s syscallingThread) resume() {
6611 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6612 }
6613
6614
6615
6616
6617
6618
6619 func preemptall() bool {
6620 res := false
6621 for _, pp := range allp {
6622 if pp.status != _Prunning {
6623 continue
6624 }
6625 if preemptone(pp) {
6626 res = true
6627 }
6628 }
6629 return res
6630 }
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642 func preemptone(pp *p) bool {
6643 mp := pp.m.ptr()
6644 if mp == nil || mp == getg().m {
6645 return false
6646 }
6647 gp := mp.curg
6648 if gp == nil || gp == mp.g0 {
6649 return false
6650 }
6651 if readgstatus(gp)&^_Gscan == _Gsyscall {
6652
6653 return false
6654 }
6655
6656 gp.preempt = true
6657
6658
6659
6660
6661
6662 gp.stackguard0 = stackPreempt
6663
6664
6665 if preemptMSupported && debug.asyncpreemptoff == 0 {
6666 pp.preempt = true
6667 preemptM(mp)
6668 }
6669
6670 return true
6671 }
6672
6673 var starttime int64
6674
6675 func schedtrace(detailed bool) {
6676 now := nanotime()
6677 if starttime == 0 {
6678 starttime = now
6679 }
6680
6681 lock(&sched.lock)
6682 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6683 if detailed {
6684 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6685 }
6686
6687
6688
6689 for i, pp := range allp {
6690 h := atomic.Load(&pp.runqhead)
6691 t := atomic.Load(&pp.runqtail)
6692 if detailed {
6693 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6694 mp := pp.m.ptr()
6695 if mp != nil {
6696 print(mp.id)
6697 } else {
6698 print("nil")
6699 }
6700 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6701 } else {
6702
6703
6704 print(" ")
6705 if i == 0 {
6706 print("[ ")
6707 }
6708 print(t - h)
6709 if i == len(allp)-1 {
6710 print(" ]")
6711 }
6712 }
6713 }
6714
6715 if !detailed {
6716
6717 print(" schedticks=[ ")
6718 for _, pp := range allp {
6719 print(pp.schedtick)
6720 print(" ")
6721 }
6722 print("]\n")
6723 }
6724
6725 if !detailed {
6726 unlock(&sched.lock)
6727 return
6728 }
6729
6730 for mp := allm; mp != nil; mp = mp.alllink {
6731 pp := mp.p.ptr()
6732 print(" M", mp.id, ": p=")
6733 if pp != nil {
6734 print(pp.id)
6735 } else {
6736 print("nil")
6737 }
6738 print(" curg=")
6739 if mp.curg != nil {
6740 print(mp.curg.goid)
6741 } else {
6742 print("nil")
6743 }
6744 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6745 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6746 print(lockedg.goid)
6747 } else {
6748 print("nil")
6749 }
6750 print("\n")
6751 }
6752
6753 forEachG(func(gp *g) {
6754 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6755 if gp.m != nil {
6756 print(gp.m.id)
6757 } else {
6758 print("nil")
6759 }
6760 print(" lockedm=")
6761 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6762 print(lockedm.id)
6763 } else {
6764 print("nil")
6765 }
6766 print("\n")
6767 })
6768 unlock(&sched.lock)
6769 }
6770
6771 type updateMaxProcsGState struct {
6772 lock mutex
6773 g *g
6774 idle atomic.Bool
6775
6776
6777 procs int32
6778 }
6779
6780 var (
6781
6782
6783 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6784
6785
6786
6787 updateMaxProcsG updateMaxProcsGState
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836 computeMaxProcsLock mutex
6837 )
6838
6839
6840
6841
6842 func defaultGOMAXPROCSUpdateEnable() {
6843 if debug.updatemaxprocs == 0 {
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855 updatemaxprocs.IncNonDefault()
6856 return
6857 }
6858
6859 go updateMaxProcsGoroutine()
6860 }
6861
6862 func updateMaxProcsGoroutine() {
6863 updateMaxProcsG.g = getg()
6864 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6865 for {
6866 lock(&updateMaxProcsG.lock)
6867 if updateMaxProcsG.idle.Load() {
6868 throw("updateMaxProcsGoroutine: phase error")
6869 }
6870 updateMaxProcsG.idle.Store(true)
6871 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6872
6873
6874 stw := stopTheWorldGC(stwGOMAXPROCS)
6875
6876
6877 lock(&sched.lock)
6878 custom := sched.customGOMAXPROCS
6879 unlock(&sched.lock)
6880 if custom {
6881 startTheWorldGC(stw)
6882 return
6883 }
6884
6885
6886
6887
6888
6889 newprocs = updateMaxProcsG.procs
6890 lock(&sched.lock)
6891 sched.customGOMAXPROCS = false
6892 unlock(&sched.lock)
6893
6894 startTheWorldGC(stw)
6895 }
6896 }
6897
6898 func sysmonUpdateGOMAXPROCS() {
6899
6900 lock(&computeMaxProcsLock)
6901
6902
6903 lock(&sched.lock)
6904 custom := sched.customGOMAXPROCS
6905 curr := gomaxprocs
6906 unlock(&sched.lock)
6907 if custom {
6908 unlock(&computeMaxProcsLock)
6909 return
6910 }
6911
6912
6913 procs := defaultGOMAXPROCS(0)
6914 unlock(&computeMaxProcsLock)
6915 if procs == curr {
6916
6917 return
6918 }
6919
6920
6921
6922
6923 if updateMaxProcsG.idle.Load() {
6924 lock(&updateMaxProcsG.lock)
6925 updateMaxProcsG.procs = procs
6926 updateMaxProcsG.idle.Store(false)
6927 var list gList
6928 list.push(updateMaxProcsG.g)
6929 injectglist(&list)
6930 unlock(&updateMaxProcsG.lock)
6931 }
6932 }
6933
6934
6935
6936
6937
6938
6939 func schedEnableUser(enable bool) {
6940 lock(&sched.lock)
6941 if sched.disable.user == !enable {
6942 unlock(&sched.lock)
6943 return
6944 }
6945 sched.disable.user = !enable
6946 if enable {
6947 n := sched.disable.runnable.size
6948 globrunqputbatch(&sched.disable.runnable)
6949 unlock(&sched.lock)
6950 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6951 startm(nil, false, false)
6952 }
6953 } else {
6954 unlock(&sched.lock)
6955 }
6956 }
6957
6958
6959
6960
6961
6962 func schedEnabled(gp *g) bool {
6963 assertLockHeld(&sched.lock)
6964
6965 if sched.disable.user {
6966 return isSystemGoroutine(gp, true)
6967 }
6968 return true
6969 }
6970
6971
6972
6973
6974
6975
6976 func mput(mp *m) {
6977 assertLockHeld(&sched.lock)
6978
6979 mp.schedlink = sched.midle
6980 sched.midle.set(mp)
6981 sched.nmidle++
6982 checkdead()
6983 }
6984
6985
6986
6987
6988
6989
6990 func mget() *m {
6991 assertLockHeld(&sched.lock)
6992
6993 mp := sched.midle.ptr()
6994 if mp != nil {
6995 sched.midle = mp.schedlink
6996 sched.nmidle--
6997 }
6998 return mp
6999 }
7000
7001
7002
7003
7004
7005
7006 func globrunqput(gp *g) {
7007 assertLockHeld(&sched.lock)
7008
7009 sched.runq.pushBack(gp)
7010 }
7011
7012
7013
7014
7015
7016
7017 func globrunqputhead(gp *g) {
7018 assertLockHeld(&sched.lock)
7019
7020 sched.runq.push(gp)
7021 }
7022
7023
7024
7025
7026
7027
7028
7029 func globrunqputbatch(batch *gQueue) {
7030 assertLockHeld(&sched.lock)
7031
7032 sched.runq.pushBackAll(*batch)
7033 *batch = gQueue{}
7034 }
7035
7036
7037
7038 func globrunqget() *g {
7039 assertLockHeld(&sched.lock)
7040
7041 if sched.runq.size == 0 {
7042 return nil
7043 }
7044
7045 return sched.runq.pop()
7046 }
7047
7048
7049
7050 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7051 assertLockHeld(&sched.lock)
7052
7053 if sched.runq.size == 0 {
7054 return
7055 }
7056
7057 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7058
7059 gp = sched.runq.pop()
7060 n--
7061
7062 for ; n > 0; n-- {
7063 gp1 := sched.runq.pop()
7064 q.pushBack(gp1)
7065 }
7066 return
7067 }
7068
7069
7070 type pMask []uint32
7071
7072
7073 func (p pMask) read(id uint32) bool {
7074 word := id / 32
7075 mask := uint32(1) << (id % 32)
7076 return (atomic.Load(&p[word]) & mask) != 0
7077 }
7078
7079
7080 func (p pMask) set(id int32) {
7081 word := id / 32
7082 mask := uint32(1) << (id % 32)
7083 atomic.Or(&p[word], mask)
7084 }
7085
7086
7087 func (p pMask) clear(id int32) {
7088 word := id / 32
7089 mask := uint32(1) << (id % 32)
7090 atomic.And(&p[word], ^mask)
7091 }
7092
7093
7094 func (p pMask) any() bool {
7095 for i := range p {
7096 if atomic.Load(&p[i]) != 0 {
7097 return true
7098 }
7099 }
7100 return false
7101 }
7102
7103
7104
7105
7106
7107 func (p pMask) resize(nprocs int32) pMask {
7108 maskWords := (nprocs + 31) / 32
7109
7110 if maskWords <= int32(cap(p)) {
7111 return p[:maskWords]
7112 }
7113 newMask := make([]uint32, maskWords)
7114
7115 copy(newMask, p)
7116 return newMask
7117 }
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130 func pidleput(pp *p, now int64) int64 {
7131 assertLockHeld(&sched.lock)
7132
7133 if !runqempty(pp) {
7134 throw("pidleput: P has non-empty run queue")
7135 }
7136 if now == 0 {
7137 now = nanotime()
7138 }
7139 if pp.timers.len.Load() == 0 {
7140 timerpMask.clear(pp.id)
7141 }
7142 idlepMask.set(pp.id)
7143 pp.link = sched.pidle
7144 sched.pidle.set(pp)
7145 sched.npidle.Add(1)
7146 if !pp.limiterEvent.start(limiterEventIdle, now) {
7147 throw("must be able to track idle limiter event")
7148 }
7149 return now
7150 }
7151
7152
7153
7154
7155
7156
7157
7158
7159 func pidleget(now int64) (*p, int64) {
7160 assertLockHeld(&sched.lock)
7161
7162 pp := sched.pidle.ptr()
7163 if pp != nil {
7164
7165 if now == 0 {
7166 now = nanotime()
7167 }
7168 timerpMask.set(pp.id)
7169 idlepMask.clear(pp.id)
7170 sched.pidle = pp.link
7171 sched.npidle.Add(-1)
7172 pp.limiterEvent.stop(limiterEventIdle, now)
7173 }
7174 return pp, now
7175 }
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187 func pidlegetSpinning(now int64) (*p, int64) {
7188 assertLockHeld(&sched.lock)
7189
7190 pp, now := pidleget(now)
7191 if pp == nil {
7192
7193
7194
7195 sched.needspinning.Store(1)
7196 return nil, now
7197 }
7198
7199 return pp, now
7200 }
7201
7202
7203
7204 func runqempty(pp *p) bool {
7205
7206
7207
7208
7209 for {
7210 head := atomic.Load(&pp.runqhead)
7211 tail := atomic.Load(&pp.runqtail)
7212 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7213 if tail == atomic.Load(&pp.runqtail) {
7214 return head == tail && runnext == 0
7215 }
7216 }
7217 }
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228 const randomizeScheduler = raceenabled
7229
7230
7231
7232
7233
7234
7235 func runqput(pp *p, gp *g, next bool) {
7236 if !haveSysmon && next {
7237
7238
7239
7240
7241
7242
7243
7244
7245 next = false
7246 }
7247 if randomizeScheduler && next && randn(2) == 0 {
7248 next = false
7249 }
7250
7251 if next {
7252 retryNext:
7253 oldnext := pp.runnext
7254 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7255 goto retryNext
7256 }
7257 if oldnext == 0 {
7258 return
7259 }
7260
7261 gp = oldnext.ptr()
7262 }
7263
7264 retry:
7265 h := atomic.LoadAcq(&pp.runqhead)
7266 t := pp.runqtail
7267 if t-h < uint32(len(pp.runq)) {
7268 pp.runq[t%uint32(len(pp.runq))].set(gp)
7269 atomic.StoreRel(&pp.runqtail, t+1)
7270 return
7271 }
7272 if runqputslow(pp, gp, h, t) {
7273 return
7274 }
7275
7276 goto retry
7277 }
7278
7279
7280
7281 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7282 var batch [len(pp.runq)/2 + 1]*g
7283
7284
7285 n := t - h
7286 n = n / 2
7287 if n != uint32(len(pp.runq)/2) {
7288 throw("runqputslow: queue is not full")
7289 }
7290 for i := uint32(0); i < n; i++ {
7291 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7292 }
7293 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7294 return false
7295 }
7296 batch[n] = gp
7297
7298 if randomizeScheduler {
7299 for i := uint32(1); i <= n; i++ {
7300 j := cheaprandn(i + 1)
7301 batch[i], batch[j] = batch[j], batch[i]
7302 }
7303 }
7304
7305
7306 for i := uint32(0); i < n; i++ {
7307 batch[i].schedlink.set(batch[i+1])
7308 }
7309
7310 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7311
7312
7313 lock(&sched.lock)
7314 globrunqputbatch(&q)
7315 unlock(&sched.lock)
7316 return true
7317 }
7318
7319
7320
7321
7322 func runqputbatch(pp *p, q *gQueue) {
7323 if q.empty() {
7324 return
7325 }
7326 h := atomic.LoadAcq(&pp.runqhead)
7327 t := pp.runqtail
7328 n := uint32(0)
7329 for !q.empty() && t-h < uint32(len(pp.runq)) {
7330 gp := q.pop()
7331 pp.runq[t%uint32(len(pp.runq))].set(gp)
7332 t++
7333 n++
7334 }
7335
7336 if randomizeScheduler {
7337 off := func(o uint32) uint32 {
7338 return (pp.runqtail + o) % uint32(len(pp.runq))
7339 }
7340 for i := uint32(1); i < n; i++ {
7341 j := cheaprandn(i + 1)
7342 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7343 }
7344 }
7345
7346 atomic.StoreRel(&pp.runqtail, t)
7347
7348 return
7349 }
7350
7351
7352
7353
7354
7355 func runqget(pp *p) (gp *g, inheritTime bool) {
7356
7357 next := pp.runnext
7358
7359
7360
7361 if next != 0 && pp.runnext.cas(next, 0) {
7362 return next.ptr(), true
7363 }
7364
7365 for {
7366 h := atomic.LoadAcq(&pp.runqhead)
7367 t := pp.runqtail
7368 if t == h {
7369 return nil, false
7370 }
7371 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7372 if atomic.CasRel(&pp.runqhead, h, h+1) {
7373 return gp, false
7374 }
7375 }
7376 }
7377
7378
7379
7380 func runqdrain(pp *p) (drainQ gQueue) {
7381 oldNext := pp.runnext
7382 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7383 drainQ.pushBack(oldNext.ptr())
7384 }
7385
7386 retry:
7387 h := atomic.LoadAcq(&pp.runqhead)
7388 t := pp.runqtail
7389 qn := t - h
7390 if qn == 0 {
7391 return
7392 }
7393 if qn > uint32(len(pp.runq)) {
7394 goto retry
7395 }
7396
7397 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7398 goto retry
7399 }
7400
7401
7402
7403
7404
7405
7406
7407
7408 for i := uint32(0); i < qn; i++ {
7409 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7410 drainQ.pushBack(gp)
7411 }
7412 return
7413 }
7414
7415
7416
7417
7418
7419 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7420 for {
7421 h := atomic.LoadAcq(&pp.runqhead)
7422 t := atomic.LoadAcq(&pp.runqtail)
7423 n := t - h
7424 n = n - n/2
7425 if n == 0 {
7426 if stealRunNextG {
7427
7428 if next := pp.runnext; next != 0 {
7429 if pp.status == _Prunning {
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440 if !osHasLowResTimer {
7441 usleep(3)
7442 } else {
7443
7444
7445
7446 osyield()
7447 }
7448 }
7449 if !pp.runnext.cas(next, 0) {
7450 continue
7451 }
7452 batch[batchHead%uint32(len(batch))] = next
7453 return 1
7454 }
7455 }
7456 return 0
7457 }
7458 if n > uint32(len(pp.runq)/2) {
7459 continue
7460 }
7461 for i := uint32(0); i < n; i++ {
7462 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7463 batch[(batchHead+i)%uint32(len(batch))] = g
7464 }
7465 if atomic.CasRel(&pp.runqhead, h, h+n) {
7466 return n
7467 }
7468 }
7469 }
7470
7471
7472
7473
7474 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7475 t := pp.runqtail
7476 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7477 if n == 0 {
7478 return nil
7479 }
7480 n--
7481 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7482 if n == 0 {
7483 return gp
7484 }
7485 h := atomic.LoadAcq(&pp.runqhead)
7486 if t-h+n >= uint32(len(pp.runq)) {
7487 throw("runqsteal: runq overflow")
7488 }
7489 atomic.StoreRel(&pp.runqtail, t+n)
7490 return gp
7491 }
7492
7493
7494
7495 type gQueue struct {
7496 head guintptr
7497 tail guintptr
7498 size int32
7499 }
7500
7501
7502 func (q *gQueue) empty() bool {
7503 return q.head == 0
7504 }
7505
7506
7507 func (q *gQueue) push(gp *g) {
7508 gp.schedlink = q.head
7509 q.head.set(gp)
7510 if q.tail == 0 {
7511 q.tail.set(gp)
7512 }
7513 q.size++
7514 }
7515
7516
7517 func (q *gQueue) pushBack(gp *g) {
7518 gp.schedlink = 0
7519 if q.tail != 0 {
7520 q.tail.ptr().schedlink.set(gp)
7521 } else {
7522 q.head.set(gp)
7523 }
7524 q.tail.set(gp)
7525 q.size++
7526 }
7527
7528
7529
7530 func (q *gQueue) pushBackAll(q2 gQueue) {
7531 if q2.tail == 0 {
7532 return
7533 }
7534 q2.tail.ptr().schedlink = 0
7535 if q.tail != 0 {
7536 q.tail.ptr().schedlink = q2.head
7537 } else {
7538 q.head = q2.head
7539 }
7540 q.tail = q2.tail
7541 q.size += q2.size
7542 }
7543
7544
7545
7546 func (q *gQueue) pop() *g {
7547 gp := q.head.ptr()
7548 if gp != nil {
7549 q.head = gp.schedlink
7550 if q.head == 0 {
7551 q.tail = 0
7552 }
7553 q.size--
7554 }
7555 return gp
7556 }
7557
7558
7559 func (q *gQueue) popList() gList {
7560 stack := gList{q.head, q.size}
7561 *q = gQueue{}
7562 return stack
7563 }
7564
7565
7566
7567 type gList struct {
7568 head guintptr
7569 size int32
7570 }
7571
7572
7573 func (l *gList) empty() bool {
7574 return l.head == 0
7575 }
7576
7577
7578 func (l *gList) push(gp *g) {
7579 gp.schedlink = l.head
7580 l.head.set(gp)
7581 l.size++
7582 }
7583
7584
7585 func (l *gList) pushAll(q gQueue) {
7586 if !q.empty() {
7587 q.tail.ptr().schedlink = l.head
7588 l.head = q.head
7589 l.size += q.size
7590 }
7591 }
7592
7593
7594 func (l *gList) pop() *g {
7595 gp := l.head.ptr()
7596 if gp != nil {
7597 l.head = gp.schedlink
7598 l.size--
7599 }
7600 return gp
7601 }
7602
7603
7604 func setMaxThreads(in int) (out int) {
7605 lock(&sched.lock)
7606 out = int(sched.maxmcount)
7607 if in > 0x7fffffff {
7608 sched.maxmcount = 0x7fffffff
7609 } else {
7610 sched.maxmcount = int32(in)
7611 }
7612 checkmcount()
7613 unlock(&sched.lock)
7614 return
7615 }
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628
7629 func procPin() int {
7630 gp := getg()
7631 mp := gp.m
7632
7633 mp.locks++
7634 return int(mp.p.ptr().id)
7635 }
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645
7646
7647
7648
7649 func procUnpin() {
7650 gp := getg()
7651 gp.m.locks--
7652 }
7653
7654
7655
7656 func sync_runtime_procPin() int {
7657 return procPin()
7658 }
7659
7660
7661
7662 func sync_runtime_procUnpin() {
7663 procUnpin()
7664 }
7665
7666
7667
7668 func sync_atomic_runtime_procPin() int {
7669 return procPin()
7670 }
7671
7672
7673
7674 func sync_atomic_runtime_procUnpin() {
7675 procUnpin()
7676 }
7677
7678
7679
7680
7681
7682 func internal_sync_runtime_canSpin(i int) bool {
7683
7684
7685
7686
7687
7688 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7689 return false
7690 }
7691 if p := getg().m.p.ptr(); !runqempty(p) {
7692 return false
7693 }
7694 return true
7695 }
7696
7697
7698
7699 func internal_sync_runtime_doSpin() {
7700 procyield(active_spin_cnt)
7701 }
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717 func sync_runtime_canSpin(i int) bool {
7718 return internal_sync_runtime_canSpin(i)
7719 }
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732
7733 func sync_runtime_doSpin() {
7734 internal_sync_runtime_doSpin()
7735 }
7736
7737 var stealOrder randomOrder
7738
7739
7740
7741
7742
7743 type randomOrder struct {
7744 count uint32
7745 coprimes []uint32
7746 }
7747
7748 type randomEnum struct {
7749 i uint32
7750 count uint32
7751 pos uint32
7752 inc uint32
7753 }
7754
7755 func (ord *randomOrder) reset(count uint32) {
7756 ord.count = count
7757 ord.coprimes = ord.coprimes[:0]
7758 for i := uint32(1); i <= count; i++ {
7759 if gcd(i, count) == 1 {
7760 ord.coprimes = append(ord.coprimes, i)
7761 }
7762 }
7763 }
7764
7765 func (ord *randomOrder) start(i uint32) randomEnum {
7766 return randomEnum{
7767 count: ord.count,
7768 pos: i % ord.count,
7769 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7770 }
7771 }
7772
7773 func (enum *randomEnum) done() bool {
7774 return enum.i == enum.count
7775 }
7776
7777 func (enum *randomEnum) next() {
7778 enum.i++
7779 enum.pos = (enum.pos + enum.inc) % enum.count
7780 }
7781
7782 func (enum *randomEnum) position() uint32 {
7783 return enum.pos
7784 }
7785
7786 func gcd(a, b uint32) uint32 {
7787 for b != 0 {
7788 a, b = b, a%b
7789 }
7790 return a
7791 }
7792
7793
7794
7795 type initTask struct {
7796 state uint32
7797 nfns uint32
7798
7799 }
7800
7801
7802
7803 var inittrace tracestat
7804
7805 type tracestat struct {
7806 active bool
7807 id uint64
7808 allocs uint64
7809 bytes uint64
7810 }
7811
7812 func doInit(ts []*initTask) {
7813 for _, t := range ts {
7814 doInit1(t)
7815 }
7816 }
7817
7818 func doInit1(t *initTask) {
7819 switch t.state {
7820 case 2:
7821 return
7822 case 1:
7823 throw("recursive call during initialization - linker skew")
7824 default:
7825 t.state = 1
7826
7827 var (
7828 start int64
7829 before tracestat
7830 )
7831
7832 if inittrace.active {
7833 start = nanotime()
7834
7835 before = inittrace
7836 }
7837
7838 if t.nfns == 0 {
7839
7840 throw("inittask with no functions")
7841 }
7842
7843 firstFunc := add(unsafe.Pointer(t), 8)
7844 for i := uint32(0); i < t.nfns; i++ {
7845 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7846 f := *(*func())(unsafe.Pointer(&p))
7847 f()
7848 }
7849
7850 if inittrace.active {
7851 end := nanotime()
7852
7853 after := inittrace
7854
7855 f := *(*func())(unsafe.Pointer(&firstFunc))
7856 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7857
7858 var sbuf [24]byte
7859 print("init ", pkg, " @")
7860 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7861 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7862 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7863 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7864 print("\n")
7865 }
7866
7867 t.state = 2
7868 }
7869 }
7870
View as plain text