Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "runtime/internal/sys"
63 "unsafe"
64 )
65
66 const (
67
68
69
70
71 mallocHeaderSize = 8
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
102 )
103
104
105
106
107
108
109
110
111
112 func heapBitsInSpan(userSize uintptr) bool {
113
114
115 return userSize <= minSizeForMallocHeader
116 }
117
118
119
120
121
122 type typePointers struct {
123
124
125
126 elem uintptr
127
128
129
130 addr uintptr
131
132
133
134
135
136 mask uintptr
137
138
139
140 typ *_type
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
155 base := span.objBase(addr)
156 tp := span.typePointersOfUnchecked(base)
157 if base == addr && size == span.elemsize {
158 return tp
159 }
160 return tp.fastForward(addr-tp.addr, addr+size)
161 }
162
163
164
165
166
167
168
169
170
171 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
172 const doubleCheck = false
173 if doubleCheck && span.objBase(addr) != addr {
174 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
175 throw("typePointersOfUnchecked consisting of non-base-address for object")
176 }
177
178 spc := span.spanclass
179 if spc.noscan() {
180 return typePointers{}
181 }
182 if heapBitsInSpan(span.elemsize) {
183
184 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
185 }
186
187
188 var typ *_type
189 if spc.sizeclass() != 0 {
190
191 typ = *(**_type)(unsafe.Pointer(addr))
192 addr += mallocHeaderSize
193 } else {
194 typ = span.largeType
195 if typ == nil {
196
197 return typePointers{}
198 }
199 }
200 gcdata := typ.GCData
201 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
202 }
203
204
205
206
207
208
209
210
211
212
213
214 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
215 const doubleCheck = false
216 if doubleCheck && (typ == nil || typ.Kind_&abi.KindGCProg != 0) {
217 throw("bad type passed to typePointersOfType")
218 }
219 if span.spanclass.noscan() {
220 return typePointers{}
221 }
222
223 gcdata := typ.GCData
224 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
225 }
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 func (tp typePointers) nextFast() (typePointers, uintptr) {
248
249 if tp.mask == 0 {
250 return tp, 0
251 }
252
253 var i int
254 if goarch.PtrSize == 8 {
255 i = sys.TrailingZeros64(uint64(tp.mask))
256 } else {
257 i = sys.TrailingZeros32(uint32(tp.mask))
258 }
259
260 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
261
262 return tp, tp.addr + uintptr(i)*goarch.PtrSize
263 }
264
265
266
267
268
269
270
271
272
273 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
274 for {
275 if tp.mask != 0 {
276 return tp.nextFast()
277 }
278
279
280 if tp.typ == nil {
281 return typePointers{}, 0
282 }
283
284
285 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
286 tp.elem += tp.typ.Size_
287 tp.addr = tp.elem
288 } else {
289 tp.addr += ptrBits * goarch.PtrSize
290 }
291
292
293 if tp.addr >= limit {
294 return typePointers{}, 0
295 }
296
297
298 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
299 if tp.addr+goarch.PtrSize*ptrBits > limit {
300 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
301 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
302 }
303 }
304 }
305
306
307
308
309
310
311
312
313 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
314
315 target := tp.addr + n
316 if target >= limit {
317 return typePointers{}
318 }
319 if tp.typ == nil {
320
321
322 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
323
324 if tp.addr+goarch.PtrSize*ptrBits > limit {
325 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
326 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
327 }
328 return tp
329 }
330
331
332
333 if n >= tp.typ.Size_ {
334
335
336 oldelem := tp.elem
337 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
338 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
339 } else {
340 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
341 }
342
343 if tp.addr-tp.elem >= tp.typ.PtrBytes {
344
345
346 tp.elem += tp.typ.Size_
347 tp.addr = tp.elem
348 tp.mask = readUintptr(tp.typ.GCData)
349
350
351 if tp.addr >= limit {
352 return typePointers{}
353 }
354 } else {
355
356
357 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
358 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
359 }
360 if tp.addr+goarch.PtrSize*ptrBits > limit {
361 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
362 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
363 }
364 return tp
365 }
366
367
368
369
370
371
372 func (span *mspan) objBase(addr uintptr) uintptr {
373 return span.base() + span.objIndex(addr)*span.elemsize
374 }
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
419 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
420 throw("bulkBarrierPreWrite: unaligned arguments")
421 }
422 if !writeBarrier.enabled {
423 return
424 }
425 s := spanOf(dst)
426 if s == nil {
427
428
429 for _, datap := range activeModules() {
430 if datap.data <= dst && dst < datap.edata {
431 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
432 return
433 }
434 }
435 for _, datap := range activeModules() {
436 if datap.bss <= dst && dst < datap.ebss {
437 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
438 return
439 }
440 }
441 return
442 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
443
444
445
446
447
448
449 return
450 }
451 buf := &getg().m.p.ptr().wbBuf
452
453
454 const doubleCheck = false
455 if doubleCheck {
456 doubleCheckTypePointersOfType(s, typ, dst, size)
457 }
458
459 var tp typePointers
460 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
461 tp = s.typePointersOfType(typ, dst)
462 } else {
463 tp = s.typePointersOf(dst, size)
464 }
465 if src == 0 {
466 for {
467 var addr uintptr
468 if tp, addr = tp.next(dst + size); addr == 0 {
469 break
470 }
471 dstx := (*uintptr)(unsafe.Pointer(addr))
472 p := buf.get1()
473 p[0] = *dstx
474 }
475 } else {
476 for {
477 var addr uintptr
478 if tp, addr = tp.next(dst + size); addr == 0 {
479 break
480 }
481 dstx := (*uintptr)(unsafe.Pointer(addr))
482 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
483 p := buf.get2()
484 p[0] = *dstx
485 p[1] = *srcx
486 }
487 }
488 }
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
505 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
506 throw("bulkBarrierPreWrite: unaligned arguments")
507 }
508 if !writeBarrier.enabled {
509 return
510 }
511 buf := &getg().m.p.ptr().wbBuf
512 s := spanOf(dst)
513
514
515 const doubleCheck = false
516 if doubleCheck {
517 doubleCheckTypePointersOfType(s, typ, dst, size)
518 }
519
520 var tp typePointers
521 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
522 tp = s.typePointersOfType(typ, dst)
523 } else {
524 tp = s.typePointersOf(dst, size)
525 }
526 for {
527 var addr uintptr
528 if tp, addr = tp.next(dst + size); addr == 0 {
529 break
530 }
531 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
532 p := buf.get1()
533 p[0] = *srcx
534 }
535 }
536
537
538
539
540
541
542 func (s *mspan) initHeapBits(forceClear bool) {
543 if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
544 b := s.heapBits()
545 clear(b)
546 }
547 }
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563 func (span *mspan) heapBits() []uintptr {
564 const doubleCheck = false
565
566 if doubleCheck && !span.isUserArenaChunk {
567 if span.spanclass.noscan() {
568 throw("heapBits called for noscan")
569 }
570 if span.elemsize > minSizeForMallocHeader {
571 throw("heapBits called for span class that should have a malloc header")
572 }
573 }
574
575
576
577 if span.npages == 1 {
578
579 return heapBitsSlice(span.base(), pageSize)
580 }
581 return heapBitsSlice(span.base(), span.npages*pageSize)
582 }
583
584
585
586
587 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
588 bitmapSize := spanSize / goarch.PtrSize / 8
589 elems := int(bitmapSize / goarch.PtrSize)
590 var sl notInHeapSlice
591 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
592 return *(*[]uintptr)(unsafe.Pointer(&sl))
593 }
594
595
596
597
598
599
600
601 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
602 spanSize := span.npages * pageSize
603 bitmapSize := spanSize / goarch.PtrSize / 8
604 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
605
606
607
608
609
610
611
612
613
614 i := (addr - span.base()) / goarch.PtrSize / ptrBits
615 j := (addr - span.base()) / goarch.PtrSize % ptrBits
616 bits := span.elemsize / goarch.PtrSize
617 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
618 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
619
620 var read uintptr
621 if j+bits > ptrBits {
622
623 bits0 := ptrBits - j
624 bits1 := bits - bits0
625 read = *word0 >> j
626 read |= (*word1 & ((1 << bits1) - 1)) << bits0
627 } else {
628
629 read = (*word0 >> j) & ((1 << bits) - 1)
630 }
631 return read
632 }
633
634
635
636
637
638
639
640
641 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
642
643 src0 := readUintptr(typ.GCData)
644
645
646 bits := span.elemsize / goarch.PtrSize
647 scanSize = typ.PtrBytes
648 src := src0
649 switch typ.Size_ {
650 case goarch.PtrSize:
651 src = (1 << (dataSize / goarch.PtrSize)) - 1
652 default:
653 for i := typ.Size_; i < dataSize; i += typ.Size_ {
654 src |= src0 << (i / goarch.PtrSize)
655 scanSize += typ.Size_
656 }
657 }
658
659
660
661 dst := span.heapBits()
662 o := (x - span.base()) / goarch.PtrSize
663 i := o / ptrBits
664 j := o % ptrBits
665 if j+bits > ptrBits {
666
667 bits0 := ptrBits - j
668 bits1 := bits - bits0
669 dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
670 dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
671 } else {
672
673 dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
674 }
675
676 const doubleCheck = false
677 if doubleCheck {
678 srcRead := span.heapBitsSmallForAddr(x)
679 if srcRead != src {
680 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
681 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
682 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
683 throw("bad pointer bits written for small object")
684 }
685 }
686 return
687 }
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
706 const doubleCheck = false
707
708 gctyp := typ
709 if header == nil {
710 if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
711 throw("tried to write heap bits, but no heap bits in span")
712 }
713
714 scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
715 } else {
716 if typ.Kind_&abi.KindGCProg != 0 {
717
718
719
720 if span.spanclass.sizeclass() != 0 {
721 throw("GCProg for type that isn't large")
722 }
723 spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
724 heapBitsOff := spaceNeeded
725 spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
726 npages := alignUp(spaceNeeded, pageSize) / pageSize
727 var progSpan *mspan
728 systemstack(func() {
729 progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
730 memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
731 })
732
733
734
735
736 gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
737 gctyp.Size_ = typ.Size_
738 gctyp.PtrBytes = typ.PtrBytes
739 gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
740 gctyp.TFlag = abi.TFlagUnrolledBitmap
741
742
743 runGCProg(addb(typ.GCData, 4), gctyp.GCData)
744 }
745
746
747 *header = gctyp
748 scanSize = span.elemsize
749 }
750
751 if doubleCheck {
752 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
753
754
755
756
757 maxIterBytes := span.elemsize
758 if header == nil {
759 maxIterBytes = dataSize
760 }
761 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
762 size := dataSize - off
763 if size == 0 {
764 off -= goarch.PtrSize
765 size += goarch.PtrSize
766 }
767 interior := x + off
768 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
769 if size == 0 {
770 size = goarch.PtrSize
771 }
772
773 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
774 if interior+size > x+maxIterBytes {
775 size = x + maxIterBytes - interior
776 }
777 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
778 }
779 return
780 }
781
782 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
783
784 tp := span.typePointersOfUnchecked(span.objBase(x))
785 maxIterBytes := span.elemsize
786 if header == nil {
787 maxIterBytes = dataSize
788 }
789 bad := false
790 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
791
792 want := false
793 if i < span.elemsize {
794 off := i % typ.Size_
795 if off < typ.PtrBytes {
796 j := off / goarch.PtrSize
797 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
798 }
799 }
800 if want {
801 var addr uintptr
802 tp, addr = tp.next(x + span.elemsize)
803 if addr == 0 {
804 println("runtime: found bad iterator")
805 }
806 if addr != x+i {
807 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
808 bad = true
809 }
810 }
811 }
812 if !bad {
813 var addr uintptr
814 tp, addr = tp.next(x + span.elemsize)
815 if addr == 0 {
816 return
817 }
818 println("runtime: extra pointer:", hex(addr))
819 }
820 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&abi.KindGCProg != 0, "\n")
821 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
822 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
823 print("runtime: limit=", hex(x+span.elemsize), "\n")
824 tp = span.typePointersOfUnchecked(x)
825 dumpTypePointers(tp)
826 for {
827 var addr uintptr
828 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
829 println("runtime: would've stopped here")
830 dumpTypePointers(tp)
831 break
832 }
833 print("runtime: addr=", hex(addr), "\n")
834 dumpTypePointers(tp)
835 }
836 throw("heapSetType: pointer entry not correct")
837 }
838
839 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
840 bad := false
841 if interior < x {
842 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
843 throw("found bad interior pointer")
844 }
845 off := interior - x
846 tp := span.typePointersOf(interior, size)
847 for i := off; i < off+size; i += goarch.PtrSize {
848
849 want := false
850 if i < span.elemsize {
851 off := i % typ.Size_
852 if off < typ.PtrBytes {
853 j := off / goarch.PtrSize
854 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
855 }
856 }
857 if want {
858 var addr uintptr
859 tp, addr = tp.next(interior + size)
860 if addr == 0 {
861 println("runtime: found bad iterator")
862 bad = true
863 }
864 if addr != x+i {
865 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
866 bad = true
867 }
868 }
869 }
870 if !bad {
871 var addr uintptr
872 tp, addr = tp.next(interior + size)
873 if addr == 0 {
874 return
875 }
876 println("runtime: extra pointer:", hex(addr))
877 }
878 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
879 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
880 print("runtime: limit=", hex(interior+size), "\n")
881 tp = span.typePointersOf(interior, size)
882 dumpTypePointers(tp)
883 for {
884 var addr uintptr
885 if tp, addr = tp.next(interior + size); addr == 0 {
886 println("runtime: would've stopped here")
887 dumpTypePointers(tp)
888 break
889 }
890 print("runtime: addr=", hex(addr), "\n")
891 dumpTypePointers(tp)
892 }
893
894 print("runtime: want: ")
895 for i := off; i < off+size; i += goarch.PtrSize {
896
897 want := false
898 if i < dataSize {
899 off := i % typ.Size_
900 if off < typ.PtrBytes {
901 j := off / goarch.PtrSize
902 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
903 }
904 }
905 if want {
906 print("1")
907 } else {
908 print("0")
909 }
910 }
911 println()
912
913 throw("heapSetType: pointer entry not correct")
914 }
915
916
917 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
918 if typ == nil || typ.Kind_&abi.KindGCProg != 0 {
919 return
920 }
921 if typ.Kind_&abi.KindMask == abi.Interface {
922
923
924
925 return
926 }
927 tp0 := s.typePointersOfType(typ, addr)
928 tp1 := s.typePointersOf(addr, size)
929 failed := false
930 for {
931 var addr0, addr1 uintptr
932 tp0, addr0 = tp0.next(addr + size)
933 tp1, addr1 = tp1.next(addr + size)
934 if addr0 != addr1 {
935 failed = true
936 break
937 }
938 if addr0 == 0 {
939 break
940 }
941 }
942 if failed {
943 tp0 := s.typePointersOfType(typ, addr)
944 tp1 := s.typePointersOf(addr, size)
945 print("runtime: addr=", hex(addr), " size=", size, "\n")
946 print("runtime: type=", toRType(typ).string(), "\n")
947 dumpTypePointers(tp0)
948 dumpTypePointers(tp1)
949 for {
950 var addr0, addr1 uintptr
951 tp0, addr0 = tp0.next(addr + size)
952 tp1, addr1 = tp1.next(addr + size)
953 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
954 if addr0 == 0 && addr1 == 0 {
955 break
956 }
957 }
958 throw("mismatch between typePointersOfType and typePointersOf")
959 }
960 }
961
962 func dumpTypePointers(tp typePointers) {
963 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
964 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
965 for i := uintptr(0); i < ptrBits; i++ {
966 if tp.mask&(uintptr(1)<<i) != 0 {
967 print("1")
968 } else {
969 print("0")
970 }
971 }
972 println()
973 }
974
975
976
977
978
979 func addb(p *byte, n uintptr) *byte {
980
981
982
983 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
984 }
985
986
987
988
989
990 func subtractb(p *byte, n uintptr) *byte {
991
992
993
994 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
995 }
996
997
998
999
1000
1001 func add1(p *byte) *byte {
1002
1003
1004
1005 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 func subtract1(p *byte) *byte {
1015
1016
1017
1018 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 type markBits struct {
1031 bytep *uint8
1032 mask uint8
1033 index uintptr
1034 }
1035
1036
1037 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1038 bytep, mask := s.allocBits.bitp(allocBitIndex)
1039 return markBits{bytep, mask, allocBitIndex}
1040 }
1041
1042
1043
1044
1045
1046 func (s *mspan) refillAllocCache(whichByte uint16) {
1047 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1048 aCache := uint64(0)
1049 aCache |= uint64(bytes[0])
1050 aCache |= uint64(bytes[1]) << (1 * 8)
1051 aCache |= uint64(bytes[2]) << (2 * 8)
1052 aCache |= uint64(bytes[3]) << (3 * 8)
1053 aCache |= uint64(bytes[4]) << (4 * 8)
1054 aCache |= uint64(bytes[5]) << (5 * 8)
1055 aCache |= uint64(bytes[6]) << (6 * 8)
1056 aCache |= uint64(bytes[7]) << (7 * 8)
1057 s.allocCache = ^aCache
1058 }
1059
1060
1061
1062
1063
1064 func (s *mspan) nextFreeIndex() uint16 {
1065 sfreeindex := s.freeindex
1066 snelems := s.nelems
1067 if sfreeindex == snelems {
1068 return sfreeindex
1069 }
1070 if sfreeindex > snelems {
1071 throw("s.freeindex > s.nelems")
1072 }
1073
1074 aCache := s.allocCache
1075
1076 bitIndex := sys.TrailingZeros64(aCache)
1077 for bitIndex == 64 {
1078
1079 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1080 if sfreeindex >= snelems {
1081 s.freeindex = snelems
1082 return snelems
1083 }
1084 whichByte := sfreeindex / 8
1085
1086 s.refillAllocCache(whichByte)
1087 aCache = s.allocCache
1088 bitIndex = sys.TrailingZeros64(aCache)
1089
1090
1091 }
1092 result := sfreeindex + uint16(bitIndex)
1093 if result >= snelems {
1094 s.freeindex = snelems
1095 return snelems
1096 }
1097
1098 s.allocCache >>= uint(bitIndex + 1)
1099 sfreeindex = result + 1
1100
1101 if sfreeindex%64 == 0 && sfreeindex != snelems {
1102
1103
1104
1105
1106
1107 whichByte := sfreeindex / 8
1108 s.refillAllocCache(whichByte)
1109 }
1110 s.freeindex = sfreeindex
1111 return result
1112 }
1113
1114
1115
1116
1117
1118
1119 func (s *mspan) isFree(index uintptr) bool {
1120 if index < uintptr(s.freeIndexForScan) {
1121 return false
1122 }
1123 bytep, mask := s.allocBits.bitp(index)
1124 return *bytep&mask == 0
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1136 const doubleCheck = false
1137
1138
1139 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1140
1141 if doubleCheck && q != n/s.elemsize {
1142 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1143 throw("bad magic division")
1144 }
1145 return q
1146 }
1147
1148
1149
1150
1151 func (s *mspan) objIndex(p uintptr) uintptr {
1152 return s.divideByElemSize(p - s.base())
1153 }
1154
1155 func markBitsForAddr(p uintptr) markBits {
1156 s := spanOf(p)
1157 objIndex := s.objIndex(p)
1158 return s.markBitsForIndex(objIndex)
1159 }
1160
1161 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
1162 bytep, mask := s.gcmarkBits.bitp(objIndex)
1163 return markBits{bytep, mask, objIndex}
1164 }
1165
1166 func (s *mspan) markBitsForBase() markBits {
1167 return markBits{&s.gcmarkBits.x, uint8(1), 0}
1168 }
1169
1170
1171 func (m markBits) isMarked() bool {
1172 return *m.bytep&m.mask != 0
1173 }
1174
1175
1176 func (m markBits) setMarked() {
1177
1178
1179
1180 atomic.Or8(m.bytep, m.mask)
1181 }
1182
1183
1184 func (m markBits) setMarkedNonAtomic() {
1185 *m.bytep |= m.mask
1186 }
1187
1188
1189 func (m markBits) clearMarked() {
1190
1191
1192
1193 atomic.And8(m.bytep, ^m.mask)
1194 }
1195
1196
1197 func markBitsForSpan(base uintptr) (mbits markBits) {
1198 mbits = markBitsForAddr(base)
1199 if mbits.mask != 1 {
1200 throw("markBitsForSpan: unaligned start")
1201 }
1202 return mbits
1203 }
1204
1205
1206 func (m *markBits) advance() {
1207 if m.mask == 1<<7 {
1208 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1209 m.mask = 1
1210 } else {
1211 m.mask = m.mask << 1
1212 }
1213 m.index++
1214 }
1215
1216
1217
1218 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1219
1220
1221 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1222
1223
1224
1225
1226
1227
1228
1229
1230 printlock()
1231 print("runtime: pointer ", hex(p))
1232 if s != nil {
1233 state := s.state.get()
1234 if state != mSpanInUse {
1235 print(" to unallocated span")
1236 } else {
1237 print(" to unused region of span")
1238 }
1239 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1240 }
1241 print("\n")
1242 if refBase != 0 {
1243 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1244 gcDumpObject("object", refBase, refOff)
1245 }
1246 getg().m.traceback = 2
1247 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1275 s = spanOf(p)
1276
1277
1278 if s == nil {
1279 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1280
1281
1282
1283 badPointer(s, p, refBase, refOff)
1284 }
1285 return
1286 }
1287
1288
1289
1290
1291 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1292
1293 if state == mSpanManual {
1294 return
1295 }
1296
1297
1298 if debug.invalidptr != 0 {
1299 badPointer(s, p, refBase, refOff)
1300 }
1301 return
1302 }
1303
1304 objIndex = s.objIndex(p)
1305 base = s.base() + objIndex*s.elemsize
1306 return
1307 }
1308
1309
1310
1311
1312 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1313
1314
1315
1316 return spanOf(p) == nil && p != clobberdeadPtr
1317 }
1318
1319 const ptrBits = 8 * goarch.PtrSize
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1330 word := maskOffset / goarch.PtrSize
1331 bits = addb(bits, word/8)
1332 mask := uint8(1) << (word % 8)
1333
1334 buf := &getg().m.p.ptr().wbBuf
1335 for i := uintptr(0); i < size; i += goarch.PtrSize {
1336 if mask == 0 {
1337 bits = addb(bits, 1)
1338 if *bits == 0 {
1339
1340 i += 7 * goarch.PtrSize
1341 continue
1342 }
1343 mask = 1
1344 }
1345 if *bits&mask != 0 {
1346 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1347 if src == 0 {
1348 p := buf.get1()
1349 p[0] = *dstx
1350 } else {
1351 srcx := (*uintptr)(unsafe.Pointer(src + i))
1352 p := buf.get2()
1353 p[0] = *dstx
1354 p[1] = *srcx
1355 }
1356 }
1357 mask <<= 1
1358 }
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1379 if typ == nil {
1380 throw("runtime: typeBitsBulkBarrier without type")
1381 }
1382 if typ.Size_ != size {
1383 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1384 throw("runtime: invalid typeBitsBulkBarrier")
1385 }
1386 if typ.Kind_&abi.KindGCProg != 0 {
1387 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
1388 throw("runtime: invalid typeBitsBulkBarrier")
1389 }
1390 if !writeBarrier.enabled {
1391 return
1392 }
1393 ptrmask := typ.GCData
1394 buf := &getg().m.p.ptr().wbBuf
1395 var bits uint32
1396 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1397 if i&(goarch.PtrSize*8-1) == 0 {
1398 bits = uint32(*ptrmask)
1399 ptrmask = addb(ptrmask, 1)
1400 } else {
1401 bits = bits >> 1
1402 }
1403 if bits&1 != 0 {
1404 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1405 srcx := (*uintptr)(unsafe.Pointer(src + i))
1406 p := buf.get2()
1407 p[0] = *dstx
1408 p[1] = *srcx
1409 }
1410 }
1411 }
1412
1413
1414
1415 func (s *mspan) countAlloc() int {
1416 count := 0
1417 bytes := divRoundUp(uintptr(s.nelems), 8)
1418
1419
1420
1421
1422 for i := uintptr(0); i < bytes; i += 8 {
1423
1424
1425
1426
1427 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1428 count += sys.OnesCount64(mrkBits)
1429 }
1430 return count
1431 }
1432
1433
1434
1435 func readUintptr(p *byte) uintptr {
1436 x := *(*uintptr)(unsafe.Pointer(p))
1437 if goarch.BigEndian {
1438 if goarch.PtrSize == 8 {
1439 return uintptr(sys.Bswap64(uint64(x)))
1440 }
1441 return uintptr(sys.Bswap32(uint32(x)))
1442 }
1443 return x
1444 }
1445
1446 var debugPtrmask struct {
1447 lock mutex
1448 data *byte
1449 }
1450
1451
1452
1453
1454 func progToPointerMask(prog *byte, size uintptr) bitvector {
1455 n := (size/goarch.PtrSize + 7) / 8
1456 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1457 x[len(x)-1] = 0xa1
1458 n = runGCProg(prog, &x[0])
1459 if x[len(x)-1] != 0xa1 {
1460 throw("progToPointerMask: overflow")
1461 }
1462 return bitvector{int32(n), &x[0]}
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 func runGCProg(prog, dst *byte) uintptr {
1481 dstStart := dst
1482
1483
1484 var bits uintptr
1485 var nbits uintptr
1486
1487 p := prog
1488 Run:
1489 for {
1490
1491
1492 for ; nbits >= 8; nbits -= 8 {
1493 *dst = uint8(bits)
1494 dst = add1(dst)
1495 bits >>= 8
1496 }
1497
1498
1499 inst := uintptr(*p)
1500 p = add1(p)
1501 n := inst & 0x7F
1502 if inst&0x80 == 0 {
1503
1504 if n == 0 {
1505
1506 break Run
1507 }
1508 nbyte := n / 8
1509 for i := uintptr(0); i < nbyte; i++ {
1510 bits |= uintptr(*p) << nbits
1511 p = add1(p)
1512 *dst = uint8(bits)
1513 dst = add1(dst)
1514 bits >>= 8
1515 }
1516 if n %= 8; n > 0 {
1517 bits |= uintptr(*p) << nbits
1518 p = add1(p)
1519 nbits += n
1520 }
1521 continue Run
1522 }
1523
1524
1525 if n == 0 {
1526 for off := uint(0); ; off += 7 {
1527 x := uintptr(*p)
1528 p = add1(p)
1529 n |= (x & 0x7F) << off
1530 if x&0x80 == 0 {
1531 break
1532 }
1533 }
1534 }
1535
1536
1537 c := uintptr(0)
1538 for off := uint(0); ; off += 7 {
1539 x := uintptr(*p)
1540 p = add1(p)
1541 c |= (x & 0x7F) << off
1542 if x&0x80 == 0 {
1543 break
1544 }
1545 }
1546 c *= n
1547
1548
1549
1550
1551
1552
1553
1554
1555 src := dst
1556 const maxBits = goarch.PtrSize*8 - 7
1557 if n <= maxBits {
1558
1559 pattern := bits
1560 npattern := nbits
1561
1562
1563 src = subtract1(src)
1564 for npattern < n {
1565 pattern <<= 8
1566 pattern |= uintptr(*src)
1567 src = subtract1(src)
1568 npattern += 8
1569 }
1570
1571
1572
1573
1574
1575 if npattern > n {
1576 pattern >>= npattern - n
1577 npattern = n
1578 }
1579
1580
1581 if npattern == 1 {
1582
1583
1584
1585
1586
1587
1588 if pattern == 1 {
1589 pattern = 1<<maxBits - 1
1590 npattern = maxBits
1591 } else {
1592 npattern = c
1593 }
1594 } else {
1595 b := pattern
1596 nb := npattern
1597 if nb+nb <= maxBits {
1598
1599 for nb <= goarch.PtrSize*8 {
1600 b |= b << nb
1601 nb += nb
1602 }
1603
1604
1605 nb = maxBits / npattern * npattern
1606 b &= 1<<nb - 1
1607 pattern = b
1608 npattern = nb
1609 }
1610 }
1611
1612
1613
1614
1615 for ; c >= npattern; c -= npattern {
1616 bits |= pattern << nbits
1617 nbits += npattern
1618 for nbits >= 8 {
1619 *dst = uint8(bits)
1620 dst = add1(dst)
1621 bits >>= 8
1622 nbits -= 8
1623 }
1624 }
1625
1626
1627 if c > 0 {
1628 pattern &= 1<<c - 1
1629 bits |= pattern << nbits
1630 nbits += c
1631 }
1632 continue Run
1633 }
1634
1635
1636
1637
1638 off := n - nbits
1639
1640 src = subtractb(src, (off+7)/8)
1641 if frag := off & 7; frag != 0 {
1642 bits |= uintptr(*src) >> (8 - frag) << nbits
1643 src = add1(src)
1644 nbits += frag
1645 c -= frag
1646 }
1647
1648
1649 for i := c / 8; i > 0; i-- {
1650 bits |= uintptr(*src) << nbits
1651 src = add1(src)
1652 *dst = uint8(bits)
1653 dst = add1(dst)
1654 bits >>= 8
1655 }
1656
1657 if c %= 8; c > 0 {
1658 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1659 nbits += c
1660 }
1661 }
1662
1663
1664 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1665 nbits += -nbits & 7
1666 for ; nbits > 0; nbits -= 8 {
1667 *dst = uint8(bits)
1668 dst = add1(dst)
1669 bits >>= 8
1670 }
1671 return totalBits
1672 }
1673
1674
1675
1676
1677
1678
1679 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1680
1681 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1682
1683 pages := divRoundUp(bitmapBytes, pageSize)
1684 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1685 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1686 return s
1687 }
1688 func dematerializeGCProg(s *mspan) {
1689 mheap_.freeManual(s, spanAllocPtrScalarBits)
1690 }
1691
1692 func dumpGCProg(p *byte) {
1693 nptr := 0
1694 for {
1695 x := *p
1696 p = add1(p)
1697 if x == 0 {
1698 print("\t", nptr, " end\n")
1699 break
1700 }
1701 if x&0x80 == 0 {
1702 print("\t", nptr, " lit ", x, ":")
1703 n := int(x+7) / 8
1704 for i := 0; i < n; i++ {
1705 print(" ", hex(*p))
1706 p = add1(p)
1707 }
1708 print("\n")
1709 nptr += int(x)
1710 } else {
1711 nbit := int(x &^ 0x80)
1712 if nbit == 0 {
1713 for nb := uint(0); ; nb += 7 {
1714 x := *p
1715 p = add1(p)
1716 nbit |= int(x&0x7f) << nb
1717 if x&0x80 == 0 {
1718 break
1719 }
1720 }
1721 }
1722 count := 0
1723 for nb := uint(0); ; nb += 7 {
1724 x := *p
1725 p = add1(p)
1726 count |= int(x&0x7f) << nb
1727 if x&0x80 == 0 {
1728 break
1729 }
1730 }
1731 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1732 nptr += nbit * count
1733 }
1734 }
1735 }
1736
1737
1738
1739
1740
1741
1742
1743 func reflect_gcbits(x any) []byte {
1744 return getgcmask(x)
1745 }
1746
1747
1748
1749
1750 func getgcmask(ep any) (mask []byte) {
1751 e := *efaceOf(&ep)
1752 p := e.data
1753 t := e._type
1754
1755 var et *_type
1756 if t.Kind_&abi.KindMask != abi.Pointer {
1757 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1758 }
1759 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1760
1761
1762 for _, datap := range activeModules() {
1763
1764 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1765 bitmap := datap.gcdatamask.bytedata
1766 n := et.Size_
1767 mask = make([]byte, n/goarch.PtrSize)
1768 for i := uintptr(0); i < n; i += goarch.PtrSize {
1769 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1770 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1771 }
1772 return
1773 }
1774
1775
1776 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1777 bitmap := datap.gcbssmask.bytedata
1778 n := et.Size_
1779 mask = make([]byte, n/goarch.PtrSize)
1780 for i := uintptr(0); i < n; i += goarch.PtrSize {
1781 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1782 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1783 }
1784 return
1785 }
1786 }
1787
1788
1789 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1790 if s.spanclass.noscan() {
1791 return nil
1792 }
1793 limit := base + s.elemsize
1794
1795
1796
1797
1798 tp := s.typePointersOfUnchecked(base)
1799 base = tp.addr
1800
1801
1802 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1803 for {
1804 var addr uintptr
1805 if tp, addr = tp.next(limit); addr == 0 {
1806 break
1807 }
1808 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1809 }
1810
1811
1812
1813
1814 for i := limit; i < s.elemsize; i++ {
1815 if *(*byte)(unsafe.Pointer(i)) != 0 {
1816 throw("found non-zeroed tail of allocation")
1817 }
1818 }
1819
1820
1821
1822 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1823 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1824 }
1825
1826 if et.Kind_&abi.KindGCProg == 0 {
1827
1828 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1829 tp = s.typePointersOfType(et, base)
1830 for {
1831 var addr uintptr
1832 if tp, addr = tp.next(limit); addr == 0 {
1833 break
1834 }
1835 maskFromType[(addr-base)/goarch.PtrSize] = 1
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 differs := false
1849 for i := range maskFromHeap {
1850 if maskFromHeap[i] != maskFromType[i] {
1851 differs = true
1852 break
1853 }
1854 }
1855
1856 if differs {
1857 print("runtime: heap mask=")
1858 for _, b := range maskFromHeap {
1859 print(b)
1860 }
1861 println()
1862 print("runtime: type mask=")
1863 for _, b := range maskFromType {
1864 print(b)
1865 }
1866 println()
1867 print("runtime: type=", toRType(et).string(), "\n")
1868 throw("found two different masks from two different methods")
1869 }
1870 }
1871
1872
1873 mask = maskFromHeap
1874
1875
1876
1877
1878 KeepAlive(ep)
1879 return
1880 }
1881
1882
1883 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1884 found := false
1885 var u unwinder
1886 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1887 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1888 found = true
1889 break
1890 }
1891 }
1892 if found {
1893 locals, _, _ := u.frame.getStackMap(false)
1894 if locals.n == 0 {
1895 return
1896 }
1897 size := uintptr(locals.n) * goarch.PtrSize
1898 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1899 mask = make([]byte, n/goarch.PtrSize)
1900 for i := uintptr(0); i < n; i += goarch.PtrSize {
1901 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1902 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1903 }
1904 }
1905 return
1906 }
1907
1908
1909
1910
1911 return
1912 }
1913
View as plain text