Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "internal/runtime/sys"
63 "unsafe"
64 )
65
66 const (
67
68
69
70
71 mallocHeaderSize = 8
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
102 )
103
104
105
106
107
108
109
110
111
112 func heapBitsInSpan(userSize uintptr) bool {
113
114
115 return userSize <= minSizeForMallocHeader
116 }
117
118
119
120
121
122 type typePointers struct {
123
124
125
126 elem uintptr
127
128
129
130 addr uintptr
131
132
133
134
135
136 mask uintptr
137
138
139
140 typ *_type
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
155 base := span.objBase(addr)
156 tp := span.typePointersOfUnchecked(base)
157 if base == addr && size == span.elemsize {
158 return tp
159 }
160 return tp.fastForward(addr-tp.addr, addr+size)
161 }
162
163
164
165
166
167
168
169
170
171 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
172 const doubleCheck = false
173 if doubleCheck && span.objBase(addr) != addr {
174 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
175 throw("typePointersOfUnchecked consisting of non-base-address for object")
176 }
177
178 spc := span.spanclass
179 if spc.noscan() {
180 return typePointers{}
181 }
182 if heapBitsInSpan(span.elemsize) {
183
184 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
185 }
186
187
188 var typ *_type
189 if spc.sizeclass() != 0 {
190
191 typ = *(**_type)(unsafe.Pointer(addr))
192 addr += mallocHeaderSize
193 } else {
194 typ = span.largeType
195 if typ == nil {
196
197 return typePointers{}
198 }
199 }
200 gcdata := typ.GCData
201 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
202 }
203
204
205
206
207
208
209
210
211
212
213
214 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
215 const doubleCheck = false
216 if doubleCheck && (typ == nil || typ.Kind_&abi.KindGCProg != 0) {
217 throw("bad type passed to typePointersOfType")
218 }
219 if span.spanclass.noscan() {
220 return typePointers{}
221 }
222
223 gcdata := typ.GCData
224 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
225 }
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 func (tp typePointers) nextFast() (typePointers, uintptr) {
248
249 if tp.mask == 0 {
250 return tp, 0
251 }
252
253 var i int
254 if goarch.PtrSize == 8 {
255 i = sys.TrailingZeros64(uint64(tp.mask))
256 } else {
257 i = sys.TrailingZeros32(uint32(tp.mask))
258 }
259
260 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
261
262 return tp, tp.addr + uintptr(i)*goarch.PtrSize
263 }
264
265
266
267
268
269
270
271
272
273 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
274 for {
275 if tp.mask != 0 {
276 return tp.nextFast()
277 }
278
279
280 if tp.typ == nil {
281 return typePointers{}, 0
282 }
283
284
285 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
286 tp.elem += tp.typ.Size_
287 tp.addr = tp.elem
288 } else {
289 tp.addr += ptrBits * goarch.PtrSize
290 }
291
292
293 if tp.addr >= limit {
294 return typePointers{}, 0
295 }
296
297
298 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
299 if tp.addr+goarch.PtrSize*ptrBits > limit {
300 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
301 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
302 }
303 }
304 }
305
306
307
308
309
310
311
312
313 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
314
315 target := tp.addr + n
316 if target >= limit {
317 return typePointers{}
318 }
319 if tp.typ == nil {
320
321
322 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
323
324 if tp.addr+goarch.PtrSize*ptrBits > limit {
325 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
326 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
327 }
328 return tp
329 }
330
331
332
333 if n >= tp.typ.Size_ {
334
335
336 oldelem := tp.elem
337 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
338 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
339 } else {
340 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
341 }
342
343 if tp.addr-tp.elem >= tp.typ.PtrBytes {
344
345
346 tp.elem += tp.typ.Size_
347 tp.addr = tp.elem
348 tp.mask = readUintptr(tp.typ.GCData)
349
350
351 if tp.addr >= limit {
352 return typePointers{}
353 }
354 } else {
355
356
357 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
358 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
359 }
360 if tp.addr+goarch.PtrSize*ptrBits > limit {
361 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
362 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
363 }
364 return tp
365 }
366
367
368
369
370
371
372 func (span *mspan) objBase(addr uintptr) uintptr {
373 return span.base() + span.objIndex(addr)*span.elemsize
374 }
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
419 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
420 throw("bulkBarrierPreWrite: unaligned arguments")
421 }
422 if !writeBarrier.enabled {
423 return
424 }
425 s := spanOf(dst)
426 if s == nil {
427
428
429 for _, datap := range activeModules() {
430 if datap.data <= dst && dst < datap.edata {
431 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
432 return
433 }
434 }
435 for _, datap := range activeModules() {
436 if datap.bss <= dst && dst < datap.ebss {
437 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
438 return
439 }
440 }
441 return
442 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
443
444
445
446
447
448
449 return
450 }
451 buf := &getg().m.p.ptr().wbBuf
452
453
454 const doubleCheck = false
455 if doubleCheck {
456 doubleCheckTypePointersOfType(s, typ, dst, size)
457 }
458
459 var tp typePointers
460 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
461 tp = s.typePointersOfType(typ, dst)
462 } else {
463 tp = s.typePointersOf(dst, size)
464 }
465 if src == 0 {
466 for {
467 var addr uintptr
468 if tp, addr = tp.next(dst + size); addr == 0 {
469 break
470 }
471 dstx := (*uintptr)(unsafe.Pointer(addr))
472 p := buf.get1()
473 p[0] = *dstx
474 }
475 } else {
476 for {
477 var addr uintptr
478 if tp, addr = tp.next(dst + size); addr == 0 {
479 break
480 }
481 dstx := (*uintptr)(unsafe.Pointer(addr))
482 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
483 p := buf.get2()
484 p[0] = *dstx
485 p[1] = *srcx
486 }
487 }
488 }
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
505 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
506 throw("bulkBarrierPreWrite: unaligned arguments")
507 }
508 if !writeBarrier.enabled {
509 return
510 }
511 buf := &getg().m.p.ptr().wbBuf
512 s := spanOf(dst)
513
514
515 const doubleCheck = false
516 if doubleCheck {
517 doubleCheckTypePointersOfType(s, typ, dst, size)
518 }
519
520 var tp typePointers
521 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
522 tp = s.typePointersOfType(typ, dst)
523 } else {
524 tp = s.typePointersOf(dst, size)
525 }
526 for {
527 var addr uintptr
528 if tp, addr = tp.next(dst + size); addr == 0 {
529 break
530 }
531 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
532 p := buf.get1()
533 p[0] = *srcx
534 }
535 }
536
537
538 func (s *mspan) initHeapBits() {
539 if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
540 b := s.heapBits()
541 for i := range b {
542 b[i] = ^uintptr(0)
543 }
544 } else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
545 b := s.heapBits()
546 clear(b)
547 }
548 }
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564 func (span *mspan) heapBits() []uintptr {
565 const doubleCheck = false
566
567 if doubleCheck && !span.isUserArenaChunk {
568 if span.spanclass.noscan() {
569 throw("heapBits called for noscan")
570 }
571 if span.elemsize > minSizeForMallocHeader {
572 throw("heapBits called for span class that should have a malloc header")
573 }
574 }
575
576
577
578 if span.npages == 1 {
579
580 return heapBitsSlice(span.base(), pageSize)
581 }
582 return heapBitsSlice(span.base(), span.npages*pageSize)
583 }
584
585
586
587
588 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
589 bitmapSize := spanSize / goarch.PtrSize / 8
590 elems := int(bitmapSize / goarch.PtrSize)
591 var sl notInHeapSlice
592 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
593 return *(*[]uintptr)(unsafe.Pointer(&sl))
594 }
595
596
597
598
599
600
601
602 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
603 spanSize := span.npages * pageSize
604 bitmapSize := spanSize / goarch.PtrSize / 8
605 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
606
607
608
609
610
611
612
613
614
615 i := (addr - span.base()) / goarch.PtrSize / ptrBits
616 j := (addr - span.base()) / goarch.PtrSize % ptrBits
617 bits := span.elemsize / goarch.PtrSize
618 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
619 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
620
621 var read uintptr
622 if j+bits > ptrBits {
623
624 bits0 := ptrBits - j
625 bits1 := bits - bits0
626 read = *word0 >> j
627 read |= (*word1 & ((1 << bits1) - 1)) << bits0
628 } else {
629
630 read = (*word0 >> j) & ((1 << bits) - 1)
631 }
632 return read
633 }
634
635
636
637
638
639
640
641
642 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
643
644 src0 := readUintptr(typ.GCData)
645
646
647 scanSize = typ.PtrBytes
648 src := src0
649 if typ.Size_ == goarch.PtrSize {
650 src = (1 << (dataSize / goarch.PtrSize)) - 1
651 } else {
652
653
654
655 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
656 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
657 }
658 for i := typ.Size_; i < dataSize; i += typ.Size_ {
659 src |= src0 << (i / goarch.PtrSize)
660 scanSize += typ.Size_
661 }
662 if asanenabled {
663
664
665 src &= (1 << (dataSize / goarch.PtrSize)) - 1
666 }
667 }
668
669
670
671 dst := unsafe.Pointer(span.base() + pageSize - pageSize/goarch.PtrSize/8)
672 o := (x - span.base()) / goarch.PtrSize
673 i := o / ptrBits
674 j := o % ptrBits
675 bits := span.elemsize / goarch.PtrSize
676 if j+bits > ptrBits {
677
678 bits0 := ptrBits - j
679 bits1 := bits - bits0
680 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
681 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
682 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
683 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
684 } else {
685
686 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
687 *dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
688 }
689
690 const doubleCheck = false
691 if doubleCheck {
692 srcRead := span.heapBitsSmallForAddr(x)
693 if srcRead != src {
694 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
695 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
696 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
697 throw("bad pointer bits written for small object")
698 }
699 }
700 return
701 }
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720 const doubleCheckHeapSetType = doubleCheckMalloc
721
722 func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
723 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
724 throw("tried to write heap bits, but no heap bits in span")
725 }
726 scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
727 if doubleCheckHeapSetType {
728 doubleCheckHeapType(x, dataSize, typ, nil, span)
729 }
730 return scanSize
731 }
732
733 func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
734 *header = typ
735 if doubleCheckHeapSetType {
736 doubleCheckHeapType(x, dataSize, typ, header, span)
737 }
738 return span.elemsize
739 }
740
741 func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
742 gctyp := typ
743 if typ.Kind_&abi.KindGCProg != 0 {
744
745
746
747 if span.spanclass.sizeclass() != 0 {
748 throw("GCProg for type that isn't large")
749 }
750 spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
751 heapBitsOff := spaceNeeded
752 spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
753 npages := alignUp(spaceNeeded, pageSize) / pageSize
754 var progSpan *mspan
755 systemstack(func() {
756 progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
757 memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
758 })
759
760
761
762
763 gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
764 gctyp.Size_ = typ.Size_
765 gctyp.PtrBytes = typ.PtrBytes
766 gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
767 gctyp.TFlag = abi.TFlagUnrolledBitmap
768
769
770 runGCProg(addb(typ.GCData, 4), gctyp.GCData)
771 }
772
773 span.largeType = gctyp
774 if doubleCheckHeapSetType {
775 doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
776 }
777 return span.elemsize
778 }
779
780 func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
781 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
782
783
784
785
786 maxIterBytes := span.elemsize
787 if header == nil {
788 maxIterBytes = dataSize
789 }
790 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
791 size := dataSize - off
792 if size == 0 {
793 off -= goarch.PtrSize
794 size += goarch.PtrSize
795 }
796 interior := x + off
797 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
798 if size == 0 {
799 size = goarch.PtrSize
800 }
801
802 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
803 if interior+size > x+maxIterBytes {
804 size = x + maxIterBytes - interior
805 }
806 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
807 }
808
809 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
810
811 tp := span.typePointersOfUnchecked(span.objBase(x))
812 maxIterBytes := span.elemsize
813 if header == nil {
814 maxIterBytes = dataSize
815 }
816 bad := false
817 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
818
819 want := false
820 if i < span.elemsize {
821 off := i % typ.Size_
822 if off < typ.PtrBytes {
823 j := off / goarch.PtrSize
824 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
825 }
826 }
827 if want {
828 var addr uintptr
829 tp, addr = tp.next(x + span.elemsize)
830 if addr == 0 {
831 println("runtime: found bad iterator")
832 }
833 if addr != x+i {
834 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
835 bad = true
836 }
837 }
838 }
839 if !bad {
840 var addr uintptr
841 tp, addr = tp.next(x + span.elemsize)
842 if addr == 0 {
843 return
844 }
845 println("runtime: extra pointer:", hex(addr))
846 }
847 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&abi.KindGCProg != 0, "\n")
848 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
849 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
850 print("runtime: limit=", hex(x+span.elemsize), "\n")
851 tp = span.typePointersOfUnchecked(x)
852 dumpTypePointers(tp)
853 for {
854 var addr uintptr
855 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
856 println("runtime: would've stopped here")
857 dumpTypePointers(tp)
858 break
859 }
860 print("runtime: addr=", hex(addr), "\n")
861 dumpTypePointers(tp)
862 }
863 throw("heapSetType: pointer entry not correct")
864 }
865
866 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
867 bad := false
868 if interior < x {
869 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
870 throw("found bad interior pointer")
871 }
872 off := interior - x
873 tp := span.typePointersOf(interior, size)
874 for i := off; i < off+size; i += goarch.PtrSize {
875
876 want := false
877 if i < span.elemsize {
878 off := i % typ.Size_
879 if off < typ.PtrBytes {
880 j := off / goarch.PtrSize
881 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
882 }
883 }
884 if want {
885 var addr uintptr
886 tp, addr = tp.next(interior + size)
887 if addr == 0 {
888 println("runtime: found bad iterator")
889 bad = true
890 }
891 if addr != x+i {
892 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
893 bad = true
894 }
895 }
896 }
897 if !bad {
898 var addr uintptr
899 tp, addr = tp.next(interior + size)
900 if addr == 0 {
901 return
902 }
903 println("runtime: extra pointer:", hex(addr))
904 }
905 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
906 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
907 print("runtime: limit=", hex(interior+size), "\n")
908 tp = span.typePointersOf(interior, size)
909 dumpTypePointers(tp)
910 for {
911 var addr uintptr
912 if tp, addr = tp.next(interior + size); addr == 0 {
913 println("runtime: would've stopped here")
914 dumpTypePointers(tp)
915 break
916 }
917 print("runtime: addr=", hex(addr), "\n")
918 dumpTypePointers(tp)
919 }
920
921 print("runtime: want: ")
922 for i := off; i < off+size; i += goarch.PtrSize {
923
924 want := false
925 if i < dataSize {
926 off := i % typ.Size_
927 if off < typ.PtrBytes {
928 j := off / goarch.PtrSize
929 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
930 }
931 }
932 if want {
933 print("1")
934 } else {
935 print("0")
936 }
937 }
938 println()
939
940 throw("heapSetType: pointer entry not correct")
941 }
942
943
944 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
945 if typ == nil || typ.Kind_&abi.KindGCProg != 0 {
946 return
947 }
948 if typ.Kind_&abi.KindMask == abi.Interface {
949
950
951
952 return
953 }
954 tp0 := s.typePointersOfType(typ, addr)
955 tp1 := s.typePointersOf(addr, size)
956 failed := false
957 for {
958 var addr0, addr1 uintptr
959 tp0, addr0 = tp0.next(addr + size)
960 tp1, addr1 = tp1.next(addr + size)
961 if addr0 != addr1 {
962 failed = true
963 break
964 }
965 if addr0 == 0 {
966 break
967 }
968 }
969 if failed {
970 tp0 := s.typePointersOfType(typ, addr)
971 tp1 := s.typePointersOf(addr, size)
972 print("runtime: addr=", hex(addr), " size=", size, "\n")
973 print("runtime: type=", toRType(typ).string(), "\n")
974 dumpTypePointers(tp0)
975 dumpTypePointers(tp1)
976 for {
977 var addr0, addr1 uintptr
978 tp0, addr0 = tp0.next(addr + size)
979 tp1, addr1 = tp1.next(addr + size)
980 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
981 if addr0 == 0 && addr1 == 0 {
982 break
983 }
984 }
985 throw("mismatch between typePointersOfType and typePointersOf")
986 }
987 }
988
989 func dumpTypePointers(tp typePointers) {
990 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
991 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
992 for i := uintptr(0); i < ptrBits; i++ {
993 if tp.mask&(uintptr(1)<<i) != 0 {
994 print("1")
995 } else {
996 print("0")
997 }
998 }
999 println()
1000 }
1001
1002
1003
1004
1005
1006 func addb(p *byte, n uintptr) *byte {
1007
1008
1009
1010 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
1011 }
1012
1013
1014
1015
1016
1017 func subtractb(p *byte, n uintptr) *byte {
1018
1019
1020
1021 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
1022 }
1023
1024
1025
1026
1027
1028 func add1(p *byte) *byte {
1029
1030
1031
1032 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1033 }
1034
1035
1036
1037
1038
1039
1040
1041 func subtract1(p *byte) *byte {
1042
1043
1044
1045 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1046 }
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 type markBits struct {
1058 bytep *uint8
1059 mask uint8
1060 index uintptr
1061 }
1062
1063
1064 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1065 bytep, mask := s.allocBits.bitp(allocBitIndex)
1066 return markBits{bytep, mask, allocBitIndex}
1067 }
1068
1069
1070
1071
1072
1073 func (s *mspan) refillAllocCache(whichByte uint16) {
1074 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1075 aCache := uint64(0)
1076 aCache |= uint64(bytes[0])
1077 aCache |= uint64(bytes[1]) << (1 * 8)
1078 aCache |= uint64(bytes[2]) << (2 * 8)
1079 aCache |= uint64(bytes[3]) << (3 * 8)
1080 aCache |= uint64(bytes[4]) << (4 * 8)
1081 aCache |= uint64(bytes[5]) << (5 * 8)
1082 aCache |= uint64(bytes[6]) << (6 * 8)
1083 aCache |= uint64(bytes[7]) << (7 * 8)
1084 s.allocCache = ^aCache
1085 }
1086
1087
1088
1089
1090
1091 func (s *mspan) nextFreeIndex() uint16 {
1092 sfreeindex := s.freeindex
1093 snelems := s.nelems
1094 if sfreeindex == snelems {
1095 return sfreeindex
1096 }
1097 if sfreeindex > snelems {
1098 throw("s.freeindex > s.nelems")
1099 }
1100
1101 aCache := s.allocCache
1102
1103 bitIndex := sys.TrailingZeros64(aCache)
1104 for bitIndex == 64 {
1105
1106 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1107 if sfreeindex >= snelems {
1108 s.freeindex = snelems
1109 return snelems
1110 }
1111 whichByte := sfreeindex / 8
1112
1113 s.refillAllocCache(whichByte)
1114 aCache = s.allocCache
1115 bitIndex = sys.TrailingZeros64(aCache)
1116
1117
1118 }
1119 result := sfreeindex + uint16(bitIndex)
1120 if result >= snelems {
1121 s.freeindex = snelems
1122 return snelems
1123 }
1124
1125 s.allocCache >>= uint(bitIndex + 1)
1126 sfreeindex = result + 1
1127
1128 if sfreeindex%64 == 0 && sfreeindex != snelems {
1129
1130
1131
1132
1133
1134 whichByte := sfreeindex / 8
1135 s.refillAllocCache(whichByte)
1136 }
1137 s.freeindex = sfreeindex
1138 return result
1139 }
1140
1141
1142
1143
1144
1145
1146 func (s *mspan) isFree(index uintptr) bool {
1147 if index < uintptr(s.freeIndexForScan) {
1148 return false
1149 }
1150 bytep, mask := s.allocBits.bitp(index)
1151 return *bytep&mask == 0
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1163 const doubleCheck = false
1164
1165
1166 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1167
1168 if doubleCheck && q != n/s.elemsize {
1169 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1170 throw("bad magic division")
1171 }
1172 return q
1173 }
1174
1175
1176
1177
1178 func (s *mspan) objIndex(p uintptr) uintptr {
1179 return s.divideByElemSize(p - s.base())
1180 }
1181
1182 func markBitsForAddr(p uintptr) markBits {
1183 s := spanOf(p)
1184 objIndex := s.objIndex(p)
1185 return s.markBitsForIndex(objIndex)
1186 }
1187
1188 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
1189 bytep, mask := s.gcmarkBits.bitp(objIndex)
1190 return markBits{bytep, mask, objIndex}
1191 }
1192
1193 func (s *mspan) markBitsForBase() markBits {
1194 return markBits{&s.gcmarkBits.x, uint8(1), 0}
1195 }
1196
1197
1198 func (m markBits) isMarked() bool {
1199 return *m.bytep&m.mask != 0
1200 }
1201
1202
1203 func (m markBits) setMarked() {
1204
1205
1206
1207 atomic.Or8(m.bytep, m.mask)
1208 }
1209
1210
1211 func (m markBits) setMarkedNonAtomic() {
1212 *m.bytep |= m.mask
1213 }
1214
1215
1216 func (m markBits) clearMarked() {
1217
1218
1219
1220 atomic.And8(m.bytep, ^m.mask)
1221 }
1222
1223
1224 func markBitsForSpan(base uintptr) (mbits markBits) {
1225 mbits = markBitsForAddr(base)
1226 if mbits.mask != 1 {
1227 throw("markBitsForSpan: unaligned start")
1228 }
1229 return mbits
1230 }
1231
1232
1233 func (m *markBits) advance() {
1234 if m.mask == 1<<7 {
1235 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1236 m.mask = 1
1237 } else {
1238 m.mask = m.mask << 1
1239 }
1240 m.index++
1241 }
1242
1243
1244
1245 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1246
1247
1248 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1249
1250
1251
1252
1253
1254
1255
1256
1257 printlock()
1258 print("runtime: pointer ", hex(p))
1259 if s != nil {
1260 state := s.state.get()
1261 if state != mSpanInUse {
1262 print(" to unallocated span")
1263 } else {
1264 print(" to unused region of span")
1265 }
1266 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1267 }
1268 print("\n")
1269 if refBase != 0 {
1270 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1271 gcDumpObject("object", refBase, refOff)
1272 }
1273 getg().m.traceback = 2
1274 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1275 }
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1302 s = spanOf(p)
1303
1304
1305 if s == nil {
1306 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1307
1308
1309
1310 badPointer(s, p, refBase, refOff)
1311 }
1312 return
1313 }
1314
1315
1316
1317
1318 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1319
1320 if state == mSpanManual {
1321 return
1322 }
1323
1324
1325 if debug.invalidptr != 0 {
1326 badPointer(s, p, refBase, refOff)
1327 }
1328 return
1329 }
1330
1331 objIndex = s.objIndex(p)
1332 base = s.base() + objIndex*s.elemsize
1333 return
1334 }
1335
1336
1337
1338
1339 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1340
1341
1342
1343 return spanOf(p) == nil && p != clobberdeadPtr
1344 }
1345
1346 const ptrBits = 8 * goarch.PtrSize
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1357 word := maskOffset / goarch.PtrSize
1358 bits = addb(bits, word/8)
1359 mask := uint8(1) << (word % 8)
1360
1361 buf := &getg().m.p.ptr().wbBuf
1362 for i := uintptr(0); i < size; i += goarch.PtrSize {
1363 if mask == 0 {
1364 bits = addb(bits, 1)
1365 if *bits == 0 {
1366
1367 i += 7 * goarch.PtrSize
1368 continue
1369 }
1370 mask = 1
1371 }
1372 if *bits&mask != 0 {
1373 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1374 if src == 0 {
1375 p := buf.get1()
1376 p[0] = *dstx
1377 } else {
1378 srcx := (*uintptr)(unsafe.Pointer(src + i))
1379 p := buf.get2()
1380 p[0] = *dstx
1381 p[1] = *srcx
1382 }
1383 }
1384 mask <<= 1
1385 }
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1406 if typ == nil {
1407 throw("runtime: typeBitsBulkBarrier without type")
1408 }
1409 if typ.Size_ != size {
1410 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1411 throw("runtime: invalid typeBitsBulkBarrier")
1412 }
1413 if typ.Kind_&abi.KindGCProg != 0 {
1414 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
1415 throw("runtime: invalid typeBitsBulkBarrier")
1416 }
1417 if !writeBarrier.enabled {
1418 return
1419 }
1420 ptrmask := typ.GCData
1421 buf := &getg().m.p.ptr().wbBuf
1422 var bits uint32
1423 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1424 if i&(goarch.PtrSize*8-1) == 0 {
1425 bits = uint32(*ptrmask)
1426 ptrmask = addb(ptrmask, 1)
1427 } else {
1428 bits = bits >> 1
1429 }
1430 if bits&1 != 0 {
1431 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1432 srcx := (*uintptr)(unsafe.Pointer(src + i))
1433 p := buf.get2()
1434 p[0] = *dstx
1435 p[1] = *srcx
1436 }
1437 }
1438 }
1439
1440
1441
1442 func (s *mspan) countAlloc() int {
1443 count := 0
1444 bytes := divRoundUp(uintptr(s.nelems), 8)
1445
1446
1447
1448
1449 for i := uintptr(0); i < bytes; i += 8 {
1450
1451
1452
1453
1454 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1455 count += sys.OnesCount64(mrkBits)
1456 }
1457 return count
1458 }
1459
1460
1461
1462 func readUintptr(p *byte) uintptr {
1463 x := *(*uintptr)(unsafe.Pointer(p))
1464 if goarch.BigEndian {
1465 if goarch.PtrSize == 8 {
1466 return uintptr(sys.Bswap64(uint64(x)))
1467 }
1468 return uintptr(sys.Bswap32(uint32(x)))
1469 }
1470 return x
1471 }
1472
1473 var debugPtrmask struct {
1474 lock mutex
1475 data *byte
1476 }
1477
1478
1479
1480
1481 func progToPointerMask(prog *byte, size uintptr) bitvector {
1482 n := (size/goarch.PtrSize + 7) / 8
1483 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1484 x[len(x)-1] = 0xa1
1485 n = runGCProg(prog, &x[0])
1486 if x[len(x)-1] != 0xa1 {
1487 throw("progToPointerMask: overflow")
1488 }
1489 return bitvector{int32(n), &x[0]}
1490 }
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507 func runGCProg(prog, dst *byte) uintptr {
1508 dstStart := dst
1509
1510
1511 var bits uintptr
1512 var nbits uintptr
1513
1514 p := prog
1515 Run:
1516 for {
1517
1518
1519 for ; nbits >= 8; nbits -= 8 {
1520 *dst = uint8(bits)
1521 dst = add1(dst)
1522 bits >>= 8
1523 }
1524
1525
1526 inst := uintptr(*p)
1527 p = add1(p)
1528 n := inst & 0x7F
1529 if inst&0x80 == 0 {
1530
1531 if n == 0 {
1532
1533 break Run
1534 }
1535 nbyte := n / 8
1536 for i := uintptr(0); i < nbyte; i++ {
1537 bits |= uintptr(*p) << nbits
1538 p = add1(p)
1539 *dst = uint8(bits)
1540 dst = add1(dst)
1541 bits >>= 8
1542 }
1543 if n %= 8; n > 0 {
1544 bits |= uintptr(*p) << nbits
1545 p = add1(p)
1546 nbits += n
1547 }
1548 continue Run
1549 }
1550
1551
1552 if n == 0 {
1553 for off := uint(0); ; off += 7 {
1554 x := uintptr(*p)
1555 p = add1(p)
1556 n |= (x & 0x7F) << off
1557 if x&0x80 == 0 {
1558 break
1559 }
1560 }
1561 }
1562
1563
1564 c := uintptr(0)
1565 for off := uint(0); ; off += 7 {
1566 x := uintptr(*p)
1567 p = add1(p)
1568 c |= (x & 0x7F) << off
1569 if x&0x80 == 0 {
1570 break
1571 }
1572 }
1573 c *= n
1574
1575
1576
1577
1578
1579
1580
1581
1582 src := dst
1583 const maxBits = goarch.PtrSize*8 - 7
1584 if n <= maxBits {
1585
1586 pattern := bits
1587 npattern := nbits
1588
1589
1590 src = subtract1(src)
1591 for npattern < n {
1592 pattern <<= 8
1593 pattern |= uintptr(*src)
1594 src = subtract1(src)
1595 npattern += 8
1596 }
1597
1598
1599
1600
1601
1602 if npattern > n {
1603 pattern >>= npattern - n
1604 npattern = n
1605 }
1606
1607
1608 if npattern == 1 {
1609
1610
1611
1612
1613
1614
1615 if pattern == 1 {
1616 pattern = 1<<maxBits - 1
1617 npattern = maxBits
1618 } else {
1619 npattern = c
1620 }
1621 } else {
1622 b := pattern
1623 nb := npattern
1624 if nb+nb <= maxBits {
1625
1626 for nb <= goarch.PtrSize*8 {
1627 b |= b << nb
1628 nb += nb
1629 }
1630
1631
1632 nb = maxBits / npattern * npattern
1633 b &= 1<<nb - 1
1634 pattern = b
1635 npattern = nb
1636 }
1637 }
1638
1639
1640
1641
1642 for ; c >= npattern; c -= npattern {
1643 bits |= pattern << nbits
1644 nbits += npattern
1645 for nbits >= 8 {
1646 *dst = uint8(bits)
1647 dst = add1(dst)
1648 bits >>= 8
1649 nbits -= 8
1650 }
1651 }
1652
1653
1654 if c > 0 {
1655 pattern &= 1<<c - 1
1656 bits |= pattern << nbits
1657 nbits += c
1658 }
1659 continue Run
1660 }
1661
1662
1663
1664
1665 off := n - nbits
1666
1667 src = subtractb(src, (off+7)/8)
1668 if frag := off & 7; frag != 0 {
1669 bits |= uintptr(*src) >> (8 - frag) << nbits
1670 src = add1(src)
1671 nbits += frag
1672 c -= frag
1673 }
1674
1675
1676 for i := c / 8; i > 0; i-- {
1677 bits |= uintptr(*src) << nbits
1678 src = add1(src)
1679 *dst = uint8(bits)
1680 dst = add1(dst)
1681 bits >>= 8
1682 }
1683
1684 if c %= 8; c > 0 {
1685 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1686 nbits += c
1687 }
1688 }
1689
1690
1691 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1692 nbits += -nbits & 7
1693 for ; nbits > 0; nbits -= 8 {
1694 *dst = uint8(bits)
1695 dst = add1(dst)
1696 bits >>= 8
1697 }
1698 return totalBits
1699 }
1700
1701
1702
1703
1704
1705
1706 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1707
1708 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1709
1710 pages := divRoundUp(bitmapBytes, pageSize)
1711 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1712 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1713 return s
1714 }
1715 func dematerializeGCProg(s *mspan) {
1716 mheap_.freeManual(s, spanAllocPtrScalarBits)
1717 }
1718
1719 func dumpGCProg(p *byte) {
1720 nptr := 0
1721 for {
1722 x := *p
1723 p = add1(p)
1724 if x == 0 {
1725 print("\t", nptr, " end\n")
1726 break
1727 }
1728 if x&0x80 == 0 {
1729 print("\t", nptr, " lit ", x, ":")
1730 n := int(x+7) / 8
1731 for i := 0; i < n; i++ {
1732 print(" ", hex(*p))
1733 p = add1(p)
1734 }
1735 print("\n")
1736 nptr += int(x)
1737 } else {
1738 nbit := int(x &^ 0x80)
1739 if nbit == 0 {
1740 for nb := uint(0); ; nb += 7 {
1741 x := *p
1742 p = add1(p)
1743 nbit |= int(x&0x7f) << nb
1744 if x&0x80 == 0 {
1745 break
1746 }
1747 }
1748 }
1749 count := 0
1750 for nb := uint(0); ; nb += 7 {
1751 x := *p
1752 p = add1(p)
1753 count |= int(x&0x7f) << nb
1754 if x&0x80 == 0 {
1755 break
1756 }
1757 }
1758 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1759 nptr += nbit * count
1760 }
1761 }
1762 }
1763
1764
1765
1766
1767
1768
1769
1770 func reflect_gcbits(x any) []byte {
1771 return getgcmask(x)
1772 }
1773
1774
1775
1776
1777 func getgcmask(ep any) (mask []byte) {
1778 e := *efaceOf(&ep)
1779 p := e.data
1780 t := e._type
1781
1782 var et *_type
1783 if t.Kind_&abi.KindMask != abi.Pointer {
1784 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1785 }
1786 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1787
1788
1789 for _, datap := range activeModules() {
1790
1791 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1792 bitmap := datap.gcdatamask.bytedata
1793 n := et.Size_
1794 mask = make([]byte, n/goarch.PtrSize)
1795 for i := uintptr(0); i < n; i += goarch.PtrSize {
1796 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1797 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1798 }
1799 return
1800 }
1801
1802
1803 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1804 bitmap := datap.gcbssmask.bytedata
1805 n := et.Size_
1806 mask = make([]byte, n/goarch.PtrSize)
1807 for i := uintptr(0); i < n; i += goarch.PtrSize {
1808 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1809 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1810 }
1811 return
1812 }
1813 }
1814
1815
1816 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1817 if s.spanclass.noscan() {
1818 return nil
1819 }
1820 limit := base + s.elemsize
1821
1822
1823
1824
1825 tp := s.typePointersOfUnchecked(base)
1826 base = tp.addr
1827
1828
1829 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1830 for {
1831 var addr uintptr
1832 if tp, addr = tp.next(limit); addr == 0 {
1833 break
1834 }
1835 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1836 }
1837
1838
1839
1840
1841 for i := limit; i < s.elemsize; i++ {
1842 if *(*byte)(unsafe.Pointer(i)) != 0 {
1843 throw("found non-zeroed tail of allocation")
1844 }
1845 }
1846
1847
1848
1849 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1850 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1851 }
1852
1853 if et.Kind_&abi.KindGCProg == 0 {
1854
1855 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1856 tp = s.typePointersOfType(et, base)
1857 for {
1858 var addr uintptr
1859 if tp, addr = tp.next(limit); addr == 0 {
1860 break
1861 }
1862 maskFromType[(addr-base)/goarch.PtrSize] = 1
1863 }
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 differs := false
1876 for i := range maskFromHeap {
1877 if maskFromHeap[i] != maskFromType[i] {
1878 differs = true
1879 break
1880 }
1881 }
1882
1883 if differs {
1884 print("runtime: heap mask=")
1885 for _, b := range maskFromHeap {
1886 print(b)
1887 }
1888 println()
1889 print("runtime: type mask=")
1890 for _, b := range maskFromType {
1891 print(b)
1892 }
1893 println()
1894 print("runtime: type=", toRType(et).string(), "\n")
1895 throw("found two different masks from two different methods")
1896 }
1897 }
1898
1899
1900 mask = maskFromHeap
1901
1902
1903
1904
1905 KeepAlive(ep)
1906 return
1907 }
1908
1909
1910 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1911 found := false
1912 var u unwinder
1913 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1914 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1915 found = true
1916 break
1917 }
1918 }
1919 if found {
1920 locals, _, _ := u.frame.getStackMap(false)
1921 if locals.n == 0 {
1922 return
1923 }
1924 size := uintptr(locals.n) * goarch.PtrSize
1925 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1926 mask = make([]byte, n/goarch.PtrSize)
1927 for i := uintptr(0); i < n; i += goarch.PtrSize {
1928 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1929 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1930 }
1931 }
1932 return
1933 }
1934
1935
1936
1937
1938 return
1939 }
1940
View as plain text