Source file
src/runtime/mgcmark.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/atomic"
13 "internal/runtime/sys"
14 "unsafe"
15 )
16
17 const (
18 fixedRootFinalizers = iota
19 fixedRootFreeGStacks
20 fixedRootCount
21
22
23
24 rootBlockBytes = 256 << 10
25
26
27
28
29
30
31
32
33 maxObletBytes = 128 << 10
34
35
36
37
38
39
40
41 drainCheckThreshold = 100000
42
43
44
45
46
47
48
49
50
51 pagesPerSpanRoot = 512
52 )
53
54
55
56
57
58 func gcMarkRootPrepare() {
59 assertWorldStopped()
60
61
62 nBlocks := func(bytes uintptr) int {
63 return int(divRoundUp(bytes, rootBlockBytes))
64 }
65
66 work.nDataRoots = 0
67 work.nBSSRoots = 0
68
69
70 for _, datap := range activeModules() {
71 nDataRoots := nBlocks(datap.edata - datap.data)
72 if nDataRoots > work.nDataRoots {
73 work.nDataRoots = nDataRoots
74 }
75
76 nBSSRoots := nBlocks(datap.ebss - datap.bss)
77 if nBSSRoots > work.nBSSRoots {
78 work.nBSSRoots = nBSSRoots
79 }
80 }
81
82
83
84
85
86
87
88
89
90
91
92
93
94 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
95 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
96
97
98
99
100
101
102
103 work.stackRoots = allGsSnapshot()
104 work.nStackRoots = len(work.stackRoots)
105
106 work.markrootNext = 0
107 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
108
109
110 work.baseData = uint32(fixedRootCount)
111 work.baseBSS = work.baseData + uint32(work.nDataRoots)
112 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
113 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
114 work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
115 }
116
117
118
119 func gcMarkRootCheck() {
120 if work.markrootNext < work.markrootJobs {
121 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
122 throw("left over markroot jobs")
123 }
124
125
126
127
128
129
130 i := 0
131 forEachGRace(func(gp *g) {
132 if i >= work.nStackRoots {
133 return
134 }
135
136 if !gp.gcscandone {
137 println("gp", gp, "goid", gp.goid,
138 "status", readgstatus(gp),
139 "gcscandone", gp.gcscandone)
140 throw("scan missed a g")
141 }
142
143 i++
144 })
145 }
146
147
148 var oneptrmask = [...]uint8{1}
149
150
151
152
153
154
155
156
157
158
159
160
161 func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
162
163 var workDone int64
164 var workCounter *atomic.Int64
165 switch {
166 case work.baseData <= i && i < work.baseBSS:
167 workCounter = &gcController.globalsScanWork
168 for _, datap := range activeModules() {
169 workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
170 }
171
172 case work.baseBSS <= i && i < work.baseSpans:
173 workCounter = &gcController.globalsScanWork
174 for _, datap := range activeModules() {
175 workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
176 }
177
178 case i == fixedRootFinalizers:
179 for fb := allfin; fb != nil; fb = fb.alllink {
180 cnt := uintptr(atomic.Load(&fb.cnt))
181 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
182 }
183
184 case i == fixedRootFreeGStacks:
185
186
187 systemstack(markrootFreeGStacks)
188
189 case work.baseSpans <= i && i < work.baseStacks:
190
191 markrootSpans(gcw, int(i-work.baseSpans))
192
193 default:
194
195 workCounter = &gcController.stackScanWork
196 if i < work.baseStacks || work.baseEnd <= i {
197 printlock()
198 print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
199 throw("markroot: bad index")
200 }
201 gp := work.stackRoots[i-work.baseStacks]
202
203
204
205 status := readgstatus(gp)
206 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
207 gp.waitsince = work.tstart
208 }
209
210
211
212 systemstack(func() {
213
214
215
216
217 userG := getg().m.curg
218 selfScan := gp == userG && readgstatus(userG) == _Grunning
219 if selfScan {
220 casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan)
221 }
222
223
224
225
226
227
228
229
230 stopped := suspendG(gp)
231 if stopped.dead {
232 gp.gcscandone = true
233 return
234 }
235 if gp.gcscandone {
236 throw("g already scanned")
237 }
238 workDone += scanstack(gp, gcw)
239 gp.gcscandone = true
240 resumeG(stopped)
241
242 if selfScan {
243 casgstatus(userG, _Gwaiting, _Grunning)
244 }
245 })
246 }
247 if workCounter != nil && workDone != 0 {
248 workCounter.Add(workDone)
249 if flushBgCredit {
250 gcFlushBgCredit(workDone)
251 }
252 }
253 return workDone
254 }
255
256
257
258
259
260
261
262 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
263 if rootBlockBytes%(8*goarch.PtrSize) != 0 {
264
265 throw("rootBlockBytes must be a multiple of 8*ptrSize")
266 }
267
268
269
270
271 off := uintptr(shard) * rootBlockBytes
272 if off >= n0 {
273 return 0
274 }
275 b := b0 + off
276 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
277 n := uintptr(rootBlockBytes)
278 if off+n > n0 {
279 n = n0 - off
280 }
281
282
283 scanblock(b, n, ptrmask, gcw, nil)
284 return int64(n)
285 }
286
287
288
289
290
291 func markrootFreeGStacks() {
292
293 lock(&sched.gFree.lock)
294 list := sched.gFree.stack
295 sched.gFree.stack = gList{}
296 unlock(&sched.gFree.lock)
297 if list.empty() {
298 return
299 }
300
301
302 q := gQueue{list.head, list.head}
303 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
304 stackfree(gp.stack)
305 gp.stack.lo = 0
306 gp.stack.hi = 0
307
308
309 q.tail.set(gp)
310 }
311
312
313 lock(&sched.gFree.lock)
314 sched.gFree.noStack.pushAll(q)
315 unlock(&sched.gFree.lock)
316 }
317
318
319
320
321 func markrootSpans(gcw *gcWork, shard int) {
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338 sg := mheap_.sweepgen
339
340
341 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
342 ha := mheap_.arenas[ai.l1()][ai.l2()]
343 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
344
345
346 specialsbits := ha.pageSpecials[arenaPage/8:]
347 specialsbits = specialsbits[:pagesPerSpanRoot/8]
348 for i := range specialsbits {
349
350 specials := atomic.Load8(&specialsbits[i])
351 if specials == 0 {
352 continue
353 }
354 for j := uint(0); j < 8; j++ {
355 if specials&(1<<j) == 0 {
356 continue
357 }
358
359
360
361
362
363
364 s := ha.spans[arenaPage+uint(i)*8+j]
365
366
367
368 if state := s.state.get(); state != mSpanInUse {
369 print("s.state = ", state, "\n")
370 throw("non in-use span found with specials bit set")
371 }
372
373 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
374
375 print("sweep ", s.sweepgen, " ", sg, "\n")
376 throw("gc: unswept span")
377 }
378
379
380
381 lock(&s.speciallock)
382 for sp := s.specials; sp != nil; sp = sp.next {
383 switch sp.kind {
384 case _KindSpecialFinalizer:
385
386
387 spf := (*specialfinalizer)(unsafe.Pointer(sp))
388
389 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
390
391
392
393
394 if !s.spanclass.noscan() {
395 scanobject(p, gcw)
396 }
397
398
399 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
400 case _KindSpecialWeakHandle:
401
402 spw := (*specialWeakHandle)(unsafe.Pointer(sp))
403 scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
404 }
405 }
406 unlock(&s.speciallock)
407 }
408 }
409 }
410
411
412
413
414
415 func gcAssistAlloc(gp *g) {
416
417
418 if getg() == gp.m.g0 {
419 return
420 }
421 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
422 return
423 }
424
425
426
427
428
429
430
431
432
433
434
435
436
437 enteredMarkAssistForTracing := false
438 retry:
439 if gcCPULimiter.limiting() {
440
441
442 if enteredMarkAssistForTracing {
443 trace := traceAcquire()
444 if trace.ok() {
445 trace.GCMarkAssistDone()
446
447
448
449
450
451
452
453 gp.inMarkAssist = false
454 traceRelease(trace)
455 } else {
456
457
458
459 gp.inMarkAssist = false
460 }
461 }
462 return
463 }
464
465
466
467
468 assistWorkPerByte := gcController.assistWorkPerByte.Load()
469 assistBytesPerWork := gcController.assistBytesPerWork.Load()
470 debtBytes := -gp.gcAssistBytes
471 scanWork := int64(assistWorkPerByte * float64(debtBytes))
472 if scanWork < gcOverAssistWork {
473 scanWork = gcOverAssistWork
474 debtBytes = int64(assistBytesPerWork * float64(scanWork))
475 }
476
477
478
479
480
481
482
483 bgScanCredit := gcController.bgScanCredit.Load()
484 stolen := int64(0)
485 if bgScanCredit > 0 {
486 if bgScanCredit < scanWork {
487 stolen = bgScanCredit
488 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
489 } else {
490 stolen = scanWork
491 gp.gcAssistBytes += debtBytes
492 }
493 gcController.bgScanCredit.Add(-stolen)
494
495 scanWork -= stolen
496
497 if scanWork == 0 {
498
499
500 if enteredMarkAssistForTracing {
501 trace := traceAcquire()
502 if trace.ok() {
503 trace.GCMarkAssistDone()
504
505
506
507
508
509
510
511 gp.inMarkAssist = false
512 traceRelease(trace)
513 } else {
514
515
516
517 gp.inMarkAssist = false
518 }
519 }
520 return
521 }
522 }
523 if !enteredMarkAssistForTracing {
524 trace := traceAcquire()
525 if trace.ok() {
526 trace.GCMarkAssistStart()
527
528
529 gp.inMarkAssist = true
530 traceRelease(trace)
531 } else {
532 gp.inMarkAssist = true
533 }
534
535
536
537
538
539 enteredMarkAssistForTracing = true
540 }
541
542
543 systemstack(func() {
544 gcAssistAlloc1(gp, scanWork)
545
546
547 })
548
549 completed := gp.param != nil
550 gp.param = nil
551 if completed {
552 gcMarkDone()
553 }
554
555 if gp.gcAssistBytes < 0 {
556
557
558
559
560
561
562
563 if gp.preempt {
564 Gosched()
565 goto retry
566 }
567
568
569
570
571
572
573
574
575
576
577 if !gcParkAssist() {
578 goto retry
579 }
580
581
582
583 }
584 if enteredMarkAssistForTracing {
585 trace := traceAcquire()
586 if trace.ok() {
587 trace.GCMarkAssistDone()
588
589
590
591
592
593
594
595 gp.inMarkAssist = false
596 traceRelease(trace)
597 } else {
598
599
600
601 gp.inMarkAssist = false
602 }
603 }
604 }
605
606
607
608
609
610
611
612
613
614
615
616 func gcAssistAlloc1(gp *g, scanWork int64) {
617
618
619 gp.param = nil
620
621 if atomic.Load(&gcBlackenEnabled) == 0 {
622
623
624
625
626
627
628
629 gp.gcAssistBytes = 0
630 return
631 }
632
633
634
635
636
637
638 startTime := nanotime()
639 trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
640
641 decnwait := atomic.Xadd(&work.nwait, -1)
642 if decnwait == work.nproc {
643 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
644 throw("nwait > work.nprocs")
645 }
646
647
648 casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking)
649
650
651
652 gcw := &getg().m.p.ptr().gcw
653 workDone := gcDrainN(gcw, scanWork)
654
655 casgstatus(gp, _Gwaiting, _Grunning)
656
657
658
659
660
661
662
663 assistBytesPerWork := gcController.assistBytesPerWork.Load()
664 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
665
666
667
668 incnwait := atomic.Xadd(&work.nwait, +1)
669 if incnwait > work.nproc {
670 println("runtime: work.nwait=", incnwait,
671 "work.nproc=", work.nproc)
672 throw("work.nwait > work.nproc")
673 }
674
675 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
676
677
678
679
680 gp.param = unsafe.Pointer(gp)
681 }
682 now := nanotime()
683 duration := now - startTime
684 pp := gp.m.p.ptr()
685 pp.gcAssistTime += duration
686 if trackLimiterEvent {
687 pp.limiterEvent.stop(limiterEventMarkAssist, now)
688 }
689 if pp.gcAssistTime > gcAssistTimeSlack {
690 gcController.assistTime.Add(pp.gcAssistTime)
691 gcCPULimiter.update(now)
692 pp.gcAssistTime = 0
693 }
694 }
695
696
697
698
699 func gcWakeAllAssists() {
700 lock(&work.assistQueue.lock)
701 list := work.assistQueue.q.popList()
702 injectglist(&list)
703 unlock(&work.assistQueue.lock)
704 }
705
706
707
708
709
710 func gcParkAssist() bool {
711 lock(&work.assistQueue.lock)
712
713
714
715 if atomic.Load(&gcBlackenEnabled) == 0 {
716 unlock(&work.assistQueue.lock)
717 return true
718 }
719
720 gp := getg()
721 oldList := work.assistQueue.q
722 work.assistQueue.q.pushBack(gp)
723
724
725
726
727
728 if gcController.bgScanCredit.Load() > 0 {
729 work.assistQueue.q = oldList
730 if oldList.tail != 0 {
731 oldList.tail.ptr().schedlink.set(nil)
732 }
733 unlock(&work.assistQueue.lock)
734 return false
735 }
736
737 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
738 return true
739 }
740
741
742
743
744
745
746
747
748
749
750
751 func gcFlushBgCredit(scanWork int64) {
752 if work.assistQueue.q.empty() {
753
754
755
756
757 gcController.bgScanCredit.Add(scanWork)
758 return
759 }
760
761 assistBytesPerWork := gcController.assistBytesPerWork.Load()
762 scanBytes := int64(float64(scanWork) * assistBytesPerWork)
763
764 lock(&work.assistQueue.lock)
765 for !work.assistQueue.q.empty() && scanBytes > 0 {
766 gp := work.assistQueue.q.pop()
767
768
769 if scanBytes+gp.gcAssistBytes >= 0 {
770
771 scanBytes += gp.gcAssistBytes
772 gp.gcAssistBytes = 0
773
774
775
776
777
778
779 ready(gp, 0, false)
780 } else {
781
782 gp.gcAssistBytes += scanBytes
783 scanBytes = 0
784
785
786
787
788 work.assistQueue.q.pushBack(gp)
789 break
790 }
791 }
792
793 if scanBytes > 0 {
794
795 assistWorkPerByte := gcController.assistWorkPerByte.Load()
796 scanWork = int64(float64(scanBytes) * assistWorkPerByte)
797 gcController.bgScanCredit.Add(scanWork)
798 }
799 unlock(&work.assistQueue.lock)
800 }
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 func scanstack(gp *g, gcw *gcWork) int64 {
820 if readgstatus(gp)&_Gscan == 0 {
821 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
822 throw("scanstack - bad status")
823 }
824
825 switch readgstatus(gp) &^ _Gscan {
826 default:
827 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
828 throw("mark - bad status")
829 case _Gdead:
830 return 0
831 case _Grunning:
832 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
833 throw("scanstack: goroutine not stopped")
834 case _Grunnable, _Gsyscall, _Gwaiting:
835
836 }
837
838 if gp == getg() {
839 throw("can't scan our own stack")
840 }
841
842
843
844
845 var sp uintptr
846 if gp.syscallsp != 0 {
847 sp = gp.syscallsp
848 } else {
849 sp = gp.sched.sp
850 }
851 scannedSize := gp.stack.hi - sp
852
853
854
855 p := getg().m.p.ptr()
856 p.scannedStackSize += uint64(scannedSize)
857 p.scannedStacks++
858
859 if isShrinkStackSafe(gp) {
860
861 shrinkstack(gp)
862 } else {
863
864 gp.preemptShrink = true
865 }
866
867 var state stackScanState
868 state.stack = gp.stack
869
870 if stackTraceDebug {
871 println("stack trace goroutine", gp.goid)
872 }
873
874 if debugScanConservative && gp.asyncSafePoint {
875 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
876 }
877
878
879
880
881 if gp.sched.ctxt != nil {
882 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
883 }
884
885
886 var u unwinder
887 for u.init(gp, 0); u.valid(); u.next() {
888 scanframeworker(&u.frame, &state, gcw)
889 }
890
891
892
893
894
895 for d := gp._defer; d != nil; d = d.link {
896 if d.fn != nil {
897
898
899 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
900 }
901 if d.link != nil {
902
903
904 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
905 }
906
907
908
909 if d.heap {
910 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
911 }
912 }
913 if gp._panic != nil {
914
915 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
916 }
917
918
919
920
921
922
923 state.buildIndex()
924 for {
925 p, conservative := state.getPtr()
926 if p == 0 {
927 break
928 }
929 obj := state.findObject(p)
930 if obj == nil {
931 continue
932 }
933 r := obj.r
934 if r == nil {
935
936 continue
937 }
938 obj.setRecord(nil)
939 if stackTraceDebug {
940 printlock()
941 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
942 if conservative {
943 print(" (conservative)")
944 }
945 println()
946 printunlock()
947 }
948 gcdata := r.gcdata()
949 var s *mspan
950 if r.useGCProg() {
951
952
953
954
955
956
957
958
959
960 s = materializeGCProg(r.ptrdata(), gcdata)
961 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
962 }
963
964 b := state.stack.lo + uintptr(obj.off)
965 if conservative {
966 scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
967 } else {
968 scanblock(b, r.ptrdata(), gcdata, gcw, &state)
969 }
970
971 if s != nil {
972 dematerializeGCProg(s)
973 }
974 }
975
976
977
978 for state.head != nil {
979 x := state.head
980 state.head = x.next
981 if stackTraceDebug {
982 for i := 0; i < x.nobj; i++ {
983 obj := &x.obj[i]
984 if obj.r == nil {
985 continue
986 }
987 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
988
989 }
990 }
991 x.nobj = 0
992 putempty((*workbuf)(unsafe.Pointer(x)))
993 }
994 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
995 throw("remaining pointer buffers")
996 }
997 return int64(scannedSize)
998 }
999
1000
1001
1002
1003 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
1004 if _DebugGC > 1 && frame.continpc != 0 {
1005 print("scanframe ", funcname(frame.fn), "\n")
1006 }
1007
1008 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt
1009 isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2
1010 if state.conservative || isAsyncPreempt || isDebugCall {
1011 if debugScanConservative {
1012 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 if frame.varp != 0 {
1024 size := frame.varp - frame.sp
1025 if size > 0 {
1026 scanConservative(frame.sp, size, nil, gcw, state)
1027 }
1028 }
1029
1030
1031 if n := frame.argBytes(); n != 0 {
1032
1033
1034 scanConservative(frame.argp, n, nil, gcw, state)
1035 }
1036
1037 if isAsyncPreempt || isDebugCall {
1038
1039
1040
1041
1042 state.conservative = true
1043 } else {
1044
1045
1046
1047 state.conservative = false
1048 }
1049 return
1050 }
1051
1052 locals, args, objs := frame.getStackMap(false)
1053
1054
1055 if locals.n > 0 {
1056 size := uintptr(locals.n) * goarch.PtrSize
1057 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1058 }
1059
1060
1061 if args.n > 0 {
1062 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
1063 }
1064
1065
1066 if frame.varp != 0 {
1067
1068
1069
1070 for i := range objs {
1071 obj := &objs[i]
1072 off := obj.off
1073 base := frame.varp
1074 if off >= 0 {
1075 base = frame.argp
1076 }
1077 ptr := base + uintptr(off)
1078 if ptr < frame.sp {
1079
1080 continue
1081 }
1082 if stackTraceDebug {
1083 println("stkobj at", hex(ptr), "of size", obj.size)
1084 }
1085 state.addObject(ptr, obj)
1086 }
1087 }
1088 }
1089
1090 type gcDrainFlags int
1091
1092 const (
1093 gcDrainUntilPreempt gcDrainFlags = 1 << iota
1094 gcDrainFlushBgCredit
1095 gcDrainIdle
1096 gcDrainFractional
1097 )
1098
1099
1100
1101 func gcDrainMarkWorkerIdle(gcw *gcWork) {
1102 gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1103 }
1104
1105
1106
1107 func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
1108 flags := gcDrainFlushBgCredit
1109 if untilPreempt {
1110 flags |= gcDrainUntilPreempt
1111 }
1112 gcDrain(gcw, flags)
1113 }
1114
1115
1116
1117 func gcDrainMarkWorkerFractional(gcw *gcWork) {
1118 gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1152 if !writeBarrier.enabled {
1153 throw("gcDrain phase incorrect")
1154 }
1155
1156
1157
1158 gp := getg().m.curg
1159 pp := gp.m.p.ptr()
1160 preemptible := flags&gcDrainUntilPreempt != 0
1161 flushBgCredit := flags&gcDrainFlushBgCredit != 0
1162 idle := flags&gcDrainIdle != 0
1163
1164 initScanWork := gcw.heapScanWork
1165
1166
1167
1168 checkWork := int64(1<<63 - 1)
1169 var check func() bool
1170 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1171 checkWork = initScanWork + drainCheckThreshold
1172 if idle {
1173 check = pollWork
1174 } else if flags&gcDrainFractional != 0 {
1175 check = pollFractionalWorkerExit
1176 }
1177 }
1178
1179
1180 if work.markrootNext < work.markrootJobs {
1181
1182
1183 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1184 job := atomic.Xadd(&work.markrootNext, +1) - 1
1185 if job >= work.markrootJobs {
1186 break
1187 }
1188 markroot(gcw, job, flushBgCredit)
1189 if check != nil && check() {
1190 goto done
1191 }
1192 }
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1206
1207
1208
1209
1210
1211 if work.full == 0 {
1212 gcw.balance()
1213 }
1214
1215 b := gcw.tryGetFast()
1216 if b == 0 {
1217 b = gcw.tryGet()
1218 if b == 0 {
1219
1220
1221
1222 wbBufFlush()
1223 b = gcw.tryGet()
1224 }
1225 }
1226 if b == 0 {
1227
1228 break
1229 }
1230 scanobject(b, gcw)
1231
1232
1233
1234
1235 if gcw.heapScanWork >= gcCreditSlack {
1236 gcController.heapScanWork.Add(gcw.heapScanWork)
1237 if flushBgCredit {
1238 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1239 initScanWork = 0
1240 }
1241 checkWork -= gcw.heapScanWork
1242 gcw.heapScanWork = 0
1243
1244 if checkWork <= 0 {
1245 checkWork += drainCheckThreshold
1246 if check != nil && check() {
1247 break
1248 }
1249 }
1250 }
1251 }
1252
1253 done:
1254
1255 if gcw.heapScanWork > 0 {
1256 gcController.heapScanWork.Add(gcw.heapScanWork)
1257 if flushBgCredit {
1258 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1259 }
1260 gcw.heapScanWork = 0
1261 }
1262 }
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1278 if !writeBarrier.enabled {
1279 throw("gcDrainN phase incorrect")
1280 }
1281
1282
1283
1284 workFlushed := -gcw.heapScanWork
1285
1286
1287
1288 gp := getg().m.curg
1289 for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
1290
1291 if work.full == 0 {
1292 gcw.balance()
1293 }
1294
1295 b := gcw.tryGetFast()
1296 if b == 0 {
1297 b = gcw.tryGet()
1298 if b == 0 {
1299
1300
1301 wbBufFlush()
1302 b = gcw.tryGet()
1303 }
1304 }
1305
1306 if b == 0 {
1307
1308 if work.markrootNext < work.markrootJobs {
1309 job := atomic.Xadd(&work.markrootNext, +1) - 1
1310 if job < work.markrootJobs {
1311 workFlushed += markroot(gcw, job, false)
1312 continue
1313 }
1314 }
1315
1316 break
1317 }
1318
1319 scanobject(b, gcw)
1320
1321
1322 if gcw.heapScanWork >= gcCreditSlack {
1323 gcController.heapScanWork.Add(gcw.heapScanWork)
1324 workFlushed += gcw.heapScanWork
1325 gcw.heapScanWork = 0
1326 }
1327 }
1328
1329
1330
1331
1332
1333 return workFlushed + gcw.heapScanWork
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1346
1347
1348
1349 b := b0
1350 n := n0
1351
1352 for i := uintptr(0); i < n; {
1353
1354 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
1355 if bits == 0 {
1356 i += goarch.PtrSize * 8
1357 continue
1358 }
1359 for j := 0; j < 8 && i < n; j++ {
1360 if bits&1 != 0 {
1361
1362 p := *(*uintptr)(unsafe.Pointer(b + i))
1363 if p != 0 {
1364 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1365 greyobject(obj, b, i, span, gcw, objIndex)
1366 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1367 stk.putPtr(p, false)
1368 }
1369 }
1370 }
1371 bits >>= 1
1372 i += goarch.PtrSize
1373 }
1374 }
1375 }
1376
1377
1378
1379
1380
1381
1382
1383 func scanobject(b uintptr, gcw *gcWork) {
1384
1385
1386
1387
1388 sys.Prefetch(b)
1389
1390
1391
1392
1393
1394
1395 s := spanOfUnchecked(b)
1396 n := s.elemsize
1397 if n == 0 {
1398 throw("scanobject n == 0")
1399 }
1400 if s.spanclass.noscan() {
1401
1402
1403 throw("scanobject of a noscan object")
1404 }
1405
1406 var tp typePointers
1407 if n > maxObletBytes {
1408
1409
1410 if b == s.base() {
1411
1412
1413
1414
1415
1416 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1417 if !gcw.putFast(oblet) {
1418 gcw.put(oblet)
1419 }
1420 }
1421 }
1422
1423
1424
1425
1426 n = s.base() + s.elemsize - b
1427 n = min(n, maxObletBytes)
1428 tp = s.typePointersOfUnchecked(s.base())
1429 tp = tp.fastForward(b-tp.addr, b+n)
1430 } else {
1431 tp = s.typePointersOfUnchecked(b)
1432 }
1433
1434 var scanSize uintptr
1435 for {
1436 var addr uintptr
1437 if tp, addr = tp.nextFast(); addr == 0 {
1438 if tp, addr = tp.next(b + n); addr == 0 {
1439 break
1440 }
1441 }
1442
1443
1444
1445
1446 scanSize = addr - b + goarch.PtrSize
1447
1448
1449
1450 obj := *(*uintptr)(unsafe.Pointer(addr))
1451
1452
1453
1454 if obj != 0 && obj-b >= n {
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464 if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
1465 greyobject(obj, b, addr-b, span, gcw, objIndex)
1466 }
1467 }
1468 }
1469 gcw.bytesMarked += uint64(n)
1470 gcw.heapScanWork += int64(scanSize)
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1482 if debugScanConservative {
1483 printlock()
1484 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1485 hexdumpWords(b, b+n, func(p uintptr) byte {
1486 if ptrmask != nil {
1487 word := (p - b) / goarch.PtrSize
1488 bits := *addb(ptrmask, word/8)
1489 if (bits>>(word%8))&1 == 0 {
1490 return '$'
1491 }
1492 }
1493
1494 val := *(*uintptr)(unsafe.Pointer(p))
1495 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1496 return '@'
1497 }
1498
1499 span := spanOfHeap(val)
1500 if span == nil {
1501 return ' '
1502 }
1503 idx := span.objIndex(val)
1504 if span.isFree(idx) {
1505 return ' '
1506 }
1507 return '*'
1508 })
1509 printunlock()
1510 }
1511
1512 for i := uintptr(0); i < n; i += goarch.PtrSize {
1513 if ptrmask != nil {
1514 word := i / goarch.PtrSize
1515 bits := *addb(ptrmask, word/8)
1516 if bits == 0 {
1517
1518
1519
1520
1521
1522
1523 if i%(goarch.PtrSize*8) != 0 {
1524 throw("misaligned mask")
1525 }
1526 i += goarch.PtrSize*8 - goarch.PtrSize
1527 continue
1528 }
1529 if (bits>>(word%8))&1 == 0 {
1530 continue
1531 }
1532 }
1533
1534 val := *(*uintptr)(unsafe.Pointer(b + i))
1535
1536
1537 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1538
1539
1540
1541
1542
1543
1544
1545
1546 state.putPtr(val, true)
1547 continue
1548 }
1549
1550
1551 span := spanOfHeap(val)
1552 if span == nil {
1553 continue
1554 }
1555
1556
1557 idx := span.objIndex(val)
1558 if span.isFree(idx) {
1559 continue
1560 }
1561
1562
1563 obj := span.base() + idx*span.elemsize
1564 greyobject(obj, b, i, span, gcw, idx)
1565 }
1566 }
1567
1568
1569
1570
1571
1572
1573 func shade(b uintptr) {
1574 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1575 gcw := &getg().m.p.ptr().gcw
1576 greyobject(obj, 0, 0, span, gcw, objIndex)
1577 }
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1588
1589 if obj&(goarch.PtrSize-1) != 0 {
1590 throw("greyobject: obj not pointer-aligned")
1591 }
1592 mbits := span.markBitsForIndex(objIndex)
1593
1594 if useCheckmark {
1595 if setCheckmark(obj, base, off, mbits) {
1596
1597 return
1598 }
1599 } else {
1600 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1601 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1602 gcDumpObject("base", base, off)
1603 gcDumpObject("obj", obj, ^uintptr(0))
1604 getg().m.traceback = 2
1605 throw("marking free object")
1606 }
1607
1608
1609 if mbits.isMarked() {
1610 return
1611 }
1612 mbits.setMarked()
1613
1614
1615 arena, pageIdx, pageMask := pageIndexOf(span.base())
1616 if arena.pageMarks[pageIdx]&pageMask == 0 {
1617 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1618 }
1619
1620
1621
1622 if span.spanclass.noscan() {
1623 gcw.bytesMarked += uint64(span.elemsize)
1624 return
1625 }
1626 }
1627
1628
1629
1630
1631
1632 sys.Prefetch(obj)
1633
1634 if !gcw.putFast(obj) {
1635 gcw.put(obj)
1636 }
1637 }
1638
1639
1640
1641 func gcDumpObject(label string, obj, off uintptr) {
1642 s := spanOf(obj)
1643 print(label, "=", hex(obj))
1644 if s == nil {
1645 print(" s=nil\n")
1646 return
1647 }
1648 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1649 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1650 print(mSpanStateNames[state], "\n")
1651 } else {
1652 print("unknown(", state, ")\n")
1653 }
1654
1655 skipped := false
1656 size := s.elemsize
1657 if s.state.get() == mSpanManual && size == 0 {
1658
1659
1660
1661 size = off + goarch.PtrSize
1662 }
1663 for i := uintptr(0); i < size; i += goarch.PtrSize {
1664
1665
1666
1667 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
1668 skipped = true
1669 continue
1670 }
1671 if skipped {
1672 print(" ...\n")
1673 skipped = false
1674 }
1675 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1676 if i == off {
1677 print(" <==")
1678 }
1679 print("\n")
1680 }
1681 if skipped {
1682 print(" ...\n")
1683 }
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693 func gcmarknewobject(span *mspan, obj uintptr) {
1694 if useCheckmark {
1695 throw("gcmarknewobject called while doing checkmark")
1696 }
1697 if gcphase == _GCmarktermination {
1698
1699 throw("mallocgc called with gcphase == _GCmarktermination")
1700 }
1701
1702
1703 objIndex := span.objIndex(obj)
1704 span.markBitsForIndex(objIndex).setMarked()
1705
1706
1707 arena, pageIdx, pageMask := pageIndexOf(span.base())
1708 if arena.pageMarks[pageIdx]&pageMask == 0 {
1709 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1710 }
1711
1712 gcw := &getg().m.p.ptr().gcw
1713 gcw.bytesMarked += uint64(span.elemsize)
1714 }
1715
1716
1717
1718
1719 func gcMarkTinyAllocs() {
1720 assertWorldStopped()
1721
1722 for _, p := range allp {
1723 c := p.mcache
1724 if c == nil || c.tiny == 0 {
1725 continue
1726 }
1727 _, span, objIndex := findObject(c.tiny, 0, 0)
1728 gcw := &p.gcw
1729 greyobject(c.tiny, 0, 0, span, gcw, objIndex)
1730 }
1731 }
1732
View as plain text