Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/goexperiment"
62 "internal/runtime/atomic"
63 "internal/runtime/gc"
64 "internal/runtime/sys"
65 "unsafe"
66 )
67
68
69
70
71
72
73
74
75
76 func heapBitsInSpan(userSize uintptr) bool {
77
78
79 return userSize <= gc.MinSizeForMallocHeader
80 }
81
82
83
84
85
86 type typePointers struct {
87
88
89
90 elem uintptr
91
92
93
94 addr uintptr
95
96
97
98
99
100 mask uintptr
101
102
103
104 typ *_type
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
119 base := span.objBase(addr)
120 tp := span.typePointersOfUnchecked(base)
121 if base == addr && size == span.elemsize {
122 return tp
123 }
124 return tp.fastForward(addr-tp.addr, addr+size)
125 }
126
127
128
129
130
131
132
133
134
135 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
136 const doubleCheck = false
137 if doubleCheck && span.objBase(addr) != addr {
138 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
139 throw("typePointersOfUnchecked consisting of non-base-address for object")
140 }
141
142 spc := span.spanclass
143 if spc.noscan() {
144 return typePointers{}
145 }
146 if heapBitsInSpan(span.elemsize) {
147
148 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
149 }
150
151
152 var typ *_type
153 if spc.sizeclass() != 0 {
154
155 typ = *(**_type)(unsafe.Pointer(addr))
156 addr += gc.MallocHeaderSize
157 } else {
158
159
160 typ = (*_type)(atomic.Loadp(unsafe.Pointer(&span.largeType)))
161 if typ == nil {
162
163 return typePointers{}
164 }
165 }
166 gcmask := getGCMask(typ)
167 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
168 }
169
170
171
172
173
174
175
176
177
178
179 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
180 const doubleCheck = false
181 if doubleCheck && typ == nil {
182 throw("bad type passed to typePointersOfType")
183 }
184 if span.spanclass.noscan() {
185 return typePointers{}
186 }
187
188 gcmask := getGCMask(typ)
189 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 func (tp typePointers) nextFast() (typePointers, uintptr) {
213
214 if tp.mask == 0 {
215 return tp, 0
216 }
217
218 var i int
219 if goarch.PtrSize == 8 {
220 i = sys.TrailingZeros64(uint64(tp.mask))
221 } else {
222 i = sys.TrailingZeros32(uint32(tp.mask))
223 }
224 if GOARCH == "amd64" {
225
226 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
227 } else {
228
229 tp.mask &= tp.mask - 1
230 }
231
232 return tp, tp.addr + uintptr(i)*goarch.PtrSize
233 }
234
235
236
237
238
239
240
241
242
243 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
244 for {
245 if tp.mask != 0 {
246 return tp.nextFast()
247 }
248
249
250 if tp.typ == nil {
251 return typePointers{}, 0
252 }
253
254
255 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
256 tp.elem += tp.typ.Size_
257 tp.addr = tp.elem
258 } else {
259 tp.addr += ptrBits * goarch.PtrSize
260 }
261
262
263 if tp.addr >= limit {
264 return typePointers{}, 0
265 }
266
267
268 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
269 if tp.addr+goarch.PtrSize*ptrBits > limit {
270 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
271 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
272 }
273 }
274 }
275
276
277
278
279
280
281
282
283 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
284
285 target := tp.addr + n
286 if target >= limit {
287 return typePointers{}
288 }
289 if tp.typ == nil {
290
291
292 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
293
294 if tp.addr+goarch.PtrSize*ptrBits > limit {
295 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
296 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
297 }
298 return tp
299 }
300
301
302
303 if n >= tp.typ.Size_ {
304
305
306 oldelem := tp.elem
307 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
308 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
309 } else {
310 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
311 }
312
313 if tp.addr-tp.elem >= tp.typ.PtrBytes {
314
315
316 tp.elem += tp.typ.Size_
317 tp.addr = tp.elem
318 tp.mask = readUintptr(getGCMask(tp.typ))
319
320
321 if tp.addr >= limit {
322 return typePointers{}
323 }
324 } else {
325
326
327 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
328 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
329 }
330 if tp.addr+goarch.PtrSize*ptrBits > limit {
331 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
332 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
333 }
334 return tp
335 }
336
337
338
339
340
341
342 func (span *mspan) objBase(addr uintptr) uintptr {
343 return span.base() + span.objIndex(addr)*span.elemsize
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
389 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
390 throw("bulkBarrierPreWrite: unaligned arguments")
391 }
392 if !writeBarrier.enabled {
393 return
394 }
395 s := spanOf(dst)
396 if s == nil {
397
398
399 for _, datap := range activeModules() {
400 if datap.data <= dst && dst < datap.edata {
401 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
402 return
403 }
404 }
405 for _, datap := range activeModules() {
406 if datap.bss <= dst && dst < datap.ebss {
407 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
408 return
409 }
410 }
411 return
412 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
413
414
415
416
417
418
419 return
420 }
421 buf := &getg().m.p.ptr().wbBuf
422
423
424 const doubleCheck = false
425 if doubleCheck {
426 doubleCheckTypePointersOfType(s, typ, dst, size)
427 }
428
429 var tp typePointers
430 if typ != nil {
431 tp = s.typePointersOfType(typ, dst)
432 } else {
433 tp = s.typePointersOf(dst, size)
434 }
435 if src == 0 {
436 for {
437 var addr uintptr
438 if tp, addr = tp.next(dst + size); addr == 0 {
439 break
440 }
441 dstx := (*uintptr)(unsafe.Pointer(addr))
442 p := buf.get1()
443 p[0] = *dstx
444 }
445 } else {
446 for {
447 var addr uintptr
448 if tp, addr = tp.next(dst + size); addr == 0 {
449 break
450 }
451 dstx := (*uintptr)(unsafe.Pointer(addr))
452 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
453 p := buf.get2()
454 p[0] = *dstx
455 p[1] = *srcx
456 }
457 }
458 }
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
475 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
476 throw("bulkBarrierPreWrite: unaligned arguments")
477 }
478 if !writeBarrier.enabled {
479 return
480 }
481 buf := &getg().m.p.ptr().wbBuf
482 s := spanOf(dst)
483
484
485 const doubleCheck = false
486 if doubleCheck {
487 doubleCheckTypePointersOfType(s, typ, dst, size)
488 }
489
490 var tp typePointers
491 if typ != nil {
492 tp = s.typePointersOfType(typ, dst)
493 } else {
494 tp = s.typePointersOf(dst, size)
495 }
496 for {
497 var addr uintptr
498 if tp, addr = tp.next(dst + size); addr == 0 {
499 break
500 }
501 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
502 p := buf.get1()
503 p[0] = *srcx
504 }
505 }
506
507
508 func (s *mspan) initHeapBits() {
509 if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
510 b := s.heapBits()
511 for i := range b {
512 b[i] = ^uintptr(0)
513 }
514 } else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
515 b := s.heapBits()
516 clear(b)
517 }
518 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) {
519 s.initInlineMarkBits()
520 }
521 }
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537 func (span *mspan) heapBits() []uintptr {
538 const doubleCheck = false
539
540 if doubleCheck && !span.isUserArenaChunk {
541 if span.spanclass.noscan() {
542 throw("heapBits called for noscan")
543 }
544 if span.elemsize > gc.MinSizeForMallocHeader {
545 throw("heapBits called for span class that should have a malloc header")
546 }
547 }
548
549
550
551 if span.npages == 1 {
552
553 return heapBitsSlice(span.base(), pageSize, span.elemsize)
554 }
555 return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize)
556 }
557
558
559
560
561 func heapBitsSlice(spanBase, spanSize, elemsize uintptr) []uintptr {
562 base, bitmapSize := spanHeapBitsRange(spanBase, spanSize, elemsize)
563 elems := int(bitmapSize / goarch.PtrSize)
564 var sl notInHeapSlice
565 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(base)), elems, elems}
566 return *(*[]uintptr)(unsafe.Pointer(&sl))
567 }
568
569
570 func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) {
571 size = spanSize / goarch.PtrSize / 8
572 base = spanBase + spanSize - size
573 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {
574 base -= unsafe.Sizeof(spanInlineMarkBits{})
575 }
576 return
577 }
578
579
580
581
582
583
584
585 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
586 hbitsBase, _ := spanHeapBitsRange(span.base(), span.npages*pageSize, span.elemsize)
587 hbits := (*byte)(unsafe.Pointer(hbitsBase))
588
589
590
591
592
593
594
595
596
597 i := (addr - span.base()) / goarch.PtrSize / ptrBits
598 j := (addr - span.base()) / goarch.PtrSize % ptrBits
599 bits := span.elemsize / goarch.PtrSize
600 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
601 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
602
603 var read uintptr
604 if j+bits > ptrBits {
605
606 bits0 := ptrBits - j
607 bits1 := bits - bits0
608 read = *word0 >> j
609 read |= (*word1 & ((1 << bits1) - 1)) << bits0
610 } else {
611
612 read = (*word0 >> j) & ((1 << bits) - 1)
613 }
614 return read
615 }
616
617
618
619
620
621
622
623
624 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
625
626 src0 := readUintptr(getGCMask(typ))
627
628
629 src := src0
630 if typ.Size_ == goarch.PtrSize {
631 src = (1 << (dataSize / goarch.PtrSize)) - 1
632
633 scanSize = dataSize
634 } else {
635
636
637
638 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
639 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
640 }
641 scanSize = typ.PtrBytes
642 for i := typ.Size_; i < dataSize; i += typ.Size_ {
643 src |= src0 << (i / goarch.PtrSize)
644 scanSize += typ.Size_
645 }
646 if asanenabled {
647
648
649 src &= (1 << (dataSize / goarch.PtrSize)) - 1
650 }
651 }
652
653
654
655 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)
656 dst := unsafe.Pointer(dstBase)
657 o := (x - span.base()) / goarch.PtrSize
658 i := o / ptrBits
659 j := o % ptrBits
660 bits := span.elemsize / goarch.PtrSize
661 if j+bits > ptrBits {
662
663 bits0 := ptrBits - j
664 bits1 := bits - bits0
665 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
666 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
667 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
668 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
669 } else {
670
671 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
672 *dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
673 }
674
675 const doubleCheck = false
676 if doubleCheck {
677 srcRead := span.heapBitsSmallForAddr(x)
678 if srcRead != src {
679 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
680 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
681 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
682 throw("bad pointer bits written for small object")
683 }
684 }
685 return
686 }
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 const doubleCheckHeapSetType = doubleCheckMalloc
706
707 func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
708 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
709 throw("tried to write heap bits, but no heap bits in span")
710 }
711 scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
712 if doubleCheckHeapSetType {
713 doubleCheckHeapType(x, dataSize, typ, nil, span)
714 }
715 return scanSize
716 }
717
718 func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
719 if header == nil {
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737 throw("runtime: pointer to heap type header nil?")
738 }
739 *header = typ
740 if doubleCheckHeapSetType {
741 doubleCheckHeapType(x, dataSize, typ, header, span)
742 }
743 return span.elemsize
744 }
745
746 func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
747 gctyp := typ
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797 atomic.StorepNoWB(unsafe.Pointer(&span.largeType), unsafe.Pointer(gctyp))
798 if doubleCheckHeapSetType {
799 doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
800 }
801 return span.elemsize
802 }
803
804 func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
805 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
806
807
808
809
810 maxIterBytes := span.elemsize
811 if header == nil {
812 maxIterBytes = dataSize
813 }
814 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
815 size := dataSize - off
816 if size == 0 {
817 off -= goarch.PtrSize
818 size += goarch.PtrSize
819 }
820 interior := x + off
821 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
822 if size == 0 {
823 size = goarch.PtrSize
824 }
825
826 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
827 if interior+size > x+maxIterBytes {
828 size = x + maxIterBytes - interior
829 }
830 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
831 }
832
833 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
834
835 tp := span.typePointersOfUnchecked(span.objBase(x))
836 maxIterBytes := span.elemsize
837 if header == nil {
838 maxIterBytes = dataSize
839 }
840 bad := false
841 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
842
843 want := false
844 if i < span.elemsize {
845 off := i % typ.Size_
846 if off < typ.PtrBytes {
847 j := off / goarch.PtrSize
848 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
849 }
850 }
851 if want {
852 var addr uintptr
853 tp, addr = tp.next(x + span.elemsize)
854 if addr == 0 {
855 println("runtime: found bad iterator")
856 }
857 if addr != x+i {
858 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
859 bad = true
860 }
861 }
862 }
863 if !bad {
864 var addr uintptr
865 tp, addr = tp.next(x + span.elemsize)
866 if addr == 0 {
867 return
868 }
869 println("runtime: extra pointer:", hex(addr))
870 }
871 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
872 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
873 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
874 print("runtime: limit=", hex(x+span.elemsize), "\n")
875 tp = span.typePointersOfUnchecked(x)
876 dumpTypePointers(tp)
877 for {
878 var addr uintptr
879 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
880 println("runtime: would've stopped here")
881 dumpTypePointers(tp)
882 break
883 }
884 print("runtime: addr=", hex(addr), "\n")
885 dumpTypePointers(tp)
886 }
887 throw("heapSetType: pointer entry not correct")
888 }
889
890 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
891 bad := false
892 if interior < x {
893 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
894 throw("found bad interior pointer")
895 }
896 off := interior - x
897 tp := span.typePointersOf(interior, size)
898 for i := off; i < off+size; i += goarch.PtrSize {
899
900 want := false
901 if i < span.elemsize {
902 off := i % typ.Size_
903 if off < typ.PtrBytes {
904 j := off / goarch.PtrSize
905 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
906 }
907 }
908 if want {
909 var addr uintptr
910 tp, addr = tp.next(interior + size)
911 if addr == 0 {
912 println("runtime: found bad iterator")
913 bad = true
914 }
915 if addr != x+i {
916 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
917 bad = true
918 }
919 }
920 }
921 if !bad {
922 var addr uintptr
923 tp, addr = tp.next(interior + size)
924 if addr == 0 {
925 return
926 }
927 println("runtime: extra pointer:", hex(addr))
928 }
929 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
930 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
931 print("runtime: limit=", hex(interior+size), "\n")
932 tp = span.typePointersOf(interior, size)
933 dumpTypePointers(tp)
934 for {
935 var addr uintptr
936 if tp, addr = tp.next(interior + size); addr == 0 {
937 println("runtime: would've stopped here")
938 dumpTypePointers(tp)
939 break
940 }
941 print("runtime: addr=", hex(addr), "\n")
942 dumpTypePointers(tp)
943 }
944
945 print("runtime: want: ")
946 for i := off; i < off+size; i += goarch.PtrSize {
947
948 want := false
949 if i < dataSize {
950 off := i % typ.Size_
951 if off < typ.PtrBytes {
952 j := off / goarch.PtrSize
953 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
954 }
955 }
956 if want {
957 print("1")
958 } else {
959 print("0")
960 }
961 }
962 println()
963
964 throw("heapSetType: pointer entry not correct")
965 }
966
967
968 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
969 if typ == nil {
970 return
971 }
972 if typ.Kind() == abi.Interface {
973
974
975
976 return
977 }
978 tp0 := s.typePointersOfType(typ, addr)
979 tp1 := s.typePointersOf(addr, size)
980 failed := false
981 for {
982 var addr0, addr1 uintptr
983 tp0, addr0 = tp0.next(addr + size)
984 tp1, addr1 = tp1.next(addr + size)
985 if addr0 != addr1 {
986 failed = true
987 break
988 }
989 if addr0 == 0 {
990 break
991 }
992 }
993 if failed {
994 tp0 := s.typePointersOfType(typ, addr)
995 tp1 := s.typePointersOf(addr, size)
996 print("runtime: addr=", hex(addr), " size=", size, "\n")
997 print("runtime: type=", toRType(typ).string(), "\n")
998 dumpTypePointers(tp0)
999 dumpTypePointers(tp1)
1000 for {
1001 var addr0, addr1 uintptr
1002 tp0, addr0 = tp0.next(addr + size)
1003 tp1, addr1 = tp1.next(addr + size)
1004 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
1005 if addr0 == 0 && addr1 == 0 {
1006 break
1007 }
1008 }
1009 throw("mismatch between typePointersOfType and typePointersOf")
1010 }
1011 }
1012
1013 func dumpTypePointers(tp typePointers) {
1014 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
1015 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
1016 for i := uintptr(0); i < ptrBits; i++ {
1017 if tp.mask&(uintptr(1)<<i) != 0 {
1018 print("1")
1019 } else {
1020 print("0")
1021 }
1022 }
1023 println()
1024 }
1025
1026
1027
1028
1029
1030 func addb(p *byte, n uintptr) *byte {
1031
1032
1033
1034 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
1035 }
1036
1037
1038
1039
1040
1041 func subtractb(p *byte, n uintptr) *byte {
1042
1043
1044
1045 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
1046 }
1047
1048
1049
1050
1051
1052 func add1(p *byte) *byte {
1053
1054
1055
1056 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1057 }
1058
1059
1060
1061
1062
1063
1064
1065 func subtract1(p *byte) *byte {
1066
1067
1068
1069 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 type markBits struct {
1082 bytep *uint8
1083 mask uint8
1084 index uintptr
1085 }
1086
1087
1088 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1089 bytep, mask := s.allocBits.bitp(allocBitIndex)
1090 return markBits{bytep, mask, allocBitIndex}
1091 }
1092
1093
1094
1095
1096
1097 func (s *mspan) refillAllocCache(whichByte uint16) {
1098 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1099 aCache := uint64(0)
1100 aCache |= uint64(bytes[0])
1101 aCache |= uint64(bytes[1]) << (1 * 8)
1102 aCache |= uint64(bytes[2]) << (2 * 8)
1103 aCache |= uint64(bytes[3]) << (3 * 8)
1104 aCache |= uint64(bytes[4]) << (4 * 8)
1105 aCache |= uint64(bytes[5]) << (5 * 8)
1106 aCache |= uint64(bytes[6]) << (6 * 8)
1107 aCache |= uint64(bytes[7]) << (7 * 8)
1108 s.allocCache = ^aCache
1109 }
1110
1111
1112
1113
1114
1115 func (s *mspan) nextFreeIndex() uint16 {
1116 sfreeindex := s.freeindex
1117 snelems := s.nelems
1118 if sfreeindex == snelems {
1119 return sfreeindex
1120 }
1121 if sfreeindex > snelems {
1122 throw("s.freeindex > s.nelems")
1123 }
1124
1125 aCache := s.allocCache
1126
1127 bitIndex := sys.TrailingZeros64(aCache)
1128 for bitIndex == 64 {
1129
1130 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1131 if sfreeindex >= snelems {
1132 s.freeindex = snelems
1133 return snelems
1134 }
1135 whichByte := sfreeindex / 8
1136
1137 s.refillAllocCache(whichByte)
1138 aCache = s.allocCache
1139 bitIndex = sys.TrailingZeros64(aCache)
1140
1141
1142 }
1143 result := sfreeindex + uint16(bitIndex)
1144 if result >= snelems {
1145 s.freeindex = snelems
1146 return snelems
1147 }
1148
1149 s.allocCache >>= uint(bitIndex + 1)
1150 sfreeindex = result + 1
1151
1152 if sfreeindex%64 == 0 && sfreeindex != snelems {
1153
1154
1155
1156
1157
1158 whichByte := sfreeindex / 8
1159 s.refillAllocCache(whichByte)
1160 }
1161 s.freeindex = sfreeindex
1162 return result
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 func (s *mspan) isFree(index uintptr) bool {
1175 if index < uintptr(s.freeindex) {
1176 return false
1177 }
1178 bytep, mask := s.allocBits.bitp(index)
1179 return *bytep&mask == 0
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 func (s *mspan) isFreeOrNewlyAllocated(index uintptr) bool {
1196 if index < uintptr(s.freeIndexForScan) {
1197 return false
1198 }
1199 bytep, mask := s.allocBits.bitp(index)
1200 return *bytep&mask == 0
1201 }
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1212 const doubleCheck = false
1213
1214
1215 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1216
1217 if doubleCheck && q != n/s.elemsize {
1218 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1219 throw("bad magic division")
1220 }
1221 return q
1222 }
1223
1224
1225
1226
1227 func (s *mspan) objIndex(p uintptr) uintptr {
1228 return s.divideByElemSize(p - s.base())
1229 }
1230
1231 func markBitsForAddr(p uintptr) markBits {
1232 s := spanOf(p)
1233 objIndex := s.objIndex(p)
1234 return s.markBitsForIndex(objIndex)
1235 }
1236
1237
1238 func (m markBits) isMarked() bool {
1239 return *m.bytep&m.mask != 0
1240 }
1241
1242
1243 func (m markBits) setMarked() {
1244
1245
1246
1247 atomic.Or8(m.bytep, m.mask)
1248 }
1249
1250
1251 func (m markBits) setMarkedNonAtomic() {
1252 *m.bytep |= m.mask
1253 }
1254
1255
1256 func (m markBits) clearMarked() {
1257
1258
1259
1260 atomic.And8(m.bytep, ^m.mask)
1261 }
1262
1263
1264 func markBitsForSpan(base uintptr) (mbits markBits) {
1265 mbits = markBitsForAddr(base)
1266 if mbits.mask != 1 {
1267 throw("markBitsForSpan: unaligned start")
1268 }
1269 return mbits
1270 }
1271
1272
1273
1274
1275 func isMarkedOrNotInHeap(p unsafe.Pointer) bool {
1276 obj, span, objIndex := findObject(uintptr(p), 0, 0)
1277 if obj != 0 {
1278 mbits := span.markBitsForIndex(objIndex)
1279 return mbits.isMarked()
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 return true
1292 }
1293
1294
1295 func (m *markBits) advance() {
1296 if m.mask == 1<<7 {
1297 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1298 m.mask = 1
1299 } else {
1300 m.mask = m.mask << 1
1301 }
1302 m.index++
1303 }
1304
1305
1306
1307 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1308
1309
1310 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1311
1312
1313
1314
1315
1316
1317
1318
1319 printlock()
1320 print("runtime: pointer ", hex(p))
1321 if s != nil {
1322 state := s.state.get()
1323 if state != mSpanInUse {
1324 print(" to unallocated span")
1325 } else {
1326 print(" to unused region of span")
1327 }
1328 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1329 }
1330 print("\n")
1331 if refBase != 0 {
1332 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1333 gcDumpObject("object", refBase, refOff)
1334 }
1335 getg().m.traceback = 2
1336 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1337 }
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1364 s = spanOf(p)
1365
1366
1367 if s == nil {
1368 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1369
1370
1371
1372 badPointer(s, p, refBase, refOff)
1373 }
1374 return
1375 }
1376
1377
1378
1379
1380 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1381
1382 if state == mSpanManual {
1383 return
1384 }
1385
1386
1387 if debug.invalidptr != 0 {
1388 badPointer(s, p, refBase, refOff)
1389 }
1390 return
1391 }
1392
1393 objIndex = s.objIndex(p)
1394 base = s.base() + objIndex*s.elemsize
1395 return
1396 }
1397
1398
1399
1400
1401 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1402
1403
1404
1405 return spanOf(p) == nil && p != clobberdeadPtr
1406 }
1407
1408 const ptrBits = 8 * goarch.PtrSize
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1419 word := maskOffset / goarch.PtrSize
1420 bits = addb(bits, word/8)
1421 mask := uint8(1) << (word % 8)
1422
1423 buf := &getg().m.p.ptr().wbBuf
1424 for i := uintptr(0); i < size; i += goarch.PtrSize {
1425 if mask == 0 {
1426 bits = addb(bits, 1)
1427 if *bits == 0 {
1428
1429 i += 7 * goarch.PtrSize
1430 continue
1431 }
1432 mask = 1
1433 }
1434 if *bits&mask != 0 {
1435 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1436 if src == 0 {
1437 p := buf.get1()
1438 p[0] = *dstx
1439 } else {
1440 srcx := (*uintptr)(unsafe.Pointer(src + i))
1441 p := buf.get2()
1442 p[0] = *dstx
1443 p[1] = *srcx
1444 }
1445 }
1446 mask <<= 1
1447 }
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1465 if typ == nil {
1466 throw("runtime: typeBitsBulkBarrier without type")
1467 }
1468 if typ.Size_ != size {
1469 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1470 throw("runtime: invalid typeBitsBulkBarrier")
1471 }
1472 if !writeBarrier.enabled {
1473 return
1474 }
1475 ptrmask := getGCMask(typ)
1476 buf := &getg().m.p.ptr().wbBuf
1477 var bits uint32
1478 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1479 if i&(goarch.PtrSize*8-1) == 0 {
1480 bits = uint32(*ptrmask)
1481 ptrmask = addb(ptrmask, 1)
1482 } else {
1483 bits = bits >> 1
1484 }
1485 if bits&1 != 0 {
1486 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1487 srcx := (*uintptr)(unsafe.Pointer(src + i))
1488 p := buf.get2()
1489 p[0] = *dstx
1490 p[1] = *srcx
1491 }
1492 }
1493 }
1494
1495
1496
1497 func (s *mspan) countAlloc() int {
1498 count := 0
1499 bytes := divRoundUp(uintptr(s.nelems), 8)
1500
1501
1502
1503
1504 for i := uintptr(0); i < bytes; i += 8 {
1505
1506
1507
1508
1509 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1510 count += sys.OnesCount64(mrkBits)
1511 }
1512 return count
1513 }
1514
1515
1516
1517 func readUintptr(p *byte) uintptr {
1518 x := *(*uintptr)(unsafe.Pointer(p))
1519 if goarch.BigEndian {
1520 if goarch.PtrSize == 8 {
1521 return uintptr(sys.Bswap64(uint64(x)))
1522 }
1523 return uintptr(sys.Bswap32(uint32(x)))
1524 }
1525 return x
1526 }
1527
1528 var debugPtrmask struct {
1529 lock mutex
1530 data *byte
1531 }
1532
1533
1534
1535
1536 func progToPointerMask(prog *byte, size uintptr) bitvector {
1537 n := (size/goarch.PtrSize + 7) / 8
1538 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1539 x[len(x)-1] = 0xa1
1540 n = runGCProg(prog, &x[0])
1541 if x[len(x)-1] != 0xa1 {
1542 throw("progToPointerMask: overflow")
1543 }
1544 return bitvector{int32(n), &x[0]}
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 func runGCProg(prog, dst *byte) uintptr {
1566 dstStart := dst
1567
1568
1569 var bits uintptr
1570 var nbits uintptr
1571
1572 p := prog
1573 Run:
1574 for {
1575
1576
1577 for ; nbits >= 8; nbits -= 8 {
1578 *dst = uint8(bits)
1579 dst = add1(dst)
1580 bits >>= 8
1581 }
1582
1583
1584 inst := uintptr(*p)
1585 p = add1(p)
1586 n := inst & 0x7F
1587 if inst&0x80 == 0 {
1588
1589 if n == 0 {
1590
1591 break Run
1592 }
1593 nbyte := n / 8
1594 for i := uintptr(0); i < nbyte; i++ {
1595 bits |= uintptr(*p) << nbits
1596 p = add1(p)
1597 *dst = uint8(bits)
1598 dst = add1(dst)
1599 bits >>= 8
1600 }
1601 if n %= 8; n > 0 {
1602 bits |= uintptr(*p) << nbits
1603 p = add1(p)
1604 nbits += n
1605 }
1606 continue Run
1607 }
1608
1609
1610 if n == 0 {
1611 for off := uint(0); ; off += 7 {
1612 x := uintptr(*p)
1613 p = add1(p)
1614 n |= (x & 0x7F) << off
1615 if x&0x80 == 0 {
1616 break
1617 }
1618 }
1619 }
1620
1621
1622 c := uintptr(0)
1623 for off := uint(0); ; off += 7 {
1624 x := uintptr(*p)
1625 p = add1(p)
1626 c |= (x & 0x7F) << off
1627 if x&0x80 == 0 {
1628 break
1629 }
1630 }
1631 c *= n
1632
1633
1634
1635
1636
1637
1638
1639
1640 src := dst
1641 const maxBits = goarch.PtrSize*8 - 7
1642 if n <= maxBits {
1643
1644 pattern := bits
1645 npattern := nbits
1646
1647
1648 src = subtract1(src)
1649 for npattern < n {
1650 pattern <<= 8
1651 pattern |= uintptr(*src)
1652 src = subtract1(src)
1653 npattern += 8
1654 }
1655
1656
1657
1658
1659
1660 if npattern > n {
1661 pattern >>= npattern - n
1662 npattern = n
1663 }
1664
1665
1666 if npattern == 1 {
1667
1668
1669
1670
1671
1672
1673 if pattern == 1 {
1674 pattern = 1<<maxBits - 1
1675 npattern = maxBits
1676 } else {
1677 npattern = c
1678 }
1679 } else {
1680 b := pattern
1681 nb := npattern
1682 if nb+nb <= maxBits {
1683
1684 for nb <= goarch.PtrSize*8 {
1685 b |= b << nb
1686 nb += nb
1687 }
1688
1689
1690 nb = maxBits / npattern * npattern
1691 b &= 1<<nb - 1
1692 pattern = b
1693 npattern = nb
1694 }
1695 }
1696
1697
1698
1699
1700 for ; c >= npattern; c -= npattern {
1701 bits |= pattern << nbits
1702 nbits += npattern
1703 for nbits >= 8 {
1704 *dst = uint8(bits)
1705 dst = add1(dst)
1706 bits >>= 8
1707 nbits -= 8
1708 }
1709 }
1710
1711
1712 if c > 0 {
1713 pattern &= 1<<c - 1
1714 bits |= pattern << nbits
1715 nbits += c
1716 }
1717 continue Run
1718 }
1719
1720
1721
1722
1723 off := n - nbits
1724
1725 src = subtractb(src, (off+7)/8)
1726 if frag := off & 7; frag != 0 {
1727 bits |= uintptr(*src) >> (8 - frag) << nbits
1728 src = add1(src)
1729 nbits += frag
1730 c -= frag
1731 }
1732
1733
1734 for i := c / 8; i > 0; i-- {
1735 bits |= uintptr(*src) << nbits
1736 src = add1(src)
1737 *dst = uint8(bits)
1738 dst = add1(dst)
1739 bits >>= 8
1740 }
1741
1742 if c %= 8; c > 0 {
1743 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1744 nbits += c
1745 }
1746 }
1747
1748
1749 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1750 nbits += -nbits & 7
1751 for ; nbits > 0; nbits -= 8 {
1752 *dst = uint8(bits)
1753 dst = add1(dst)
1754 bits >>= 8
1755 }
1756 return totalBits
1757 }
1758
1759 func dumpGCProg(p *byte) {
1760 nptr := 0
1761 for {
1762 x := *p
1763 p = add1(p)
1764 if x == 0 {
1765 print("\t", nptr, " end\n")
1766 break
1767 }
1768 if x&0x80 == 0 {
1769 print("\t", nptr, " lit ", x, ":")
1770 n := int(x+7) / 8
1771 for i := 0; i < n; i++ {
1772 print(" ", hex(*p))
1773 p = add1(p)
1774 }
1775 print("\n")
1776 nptr += int(x)
1777 } else {
1778 nbit := int(x &^ 0x80)
1779 if nbit == 0 {
1780 for nb := uint(0); ; nb += 7 {
1781 x := *p
1782 p = add1(p)
1783 nbit |= int(x&0x7f) << nb
1784 if x&0x80 == 0 {
1785 break
1786 }
1787 }
1788 }
1789 count := 0
1790 for nb := uint(0); ; nb += 7 {
1791 x := *p
1792 p = add1(p)
1793 count |= int(x&0x7f) << nb
1794 if x&0x80 == 0 {
1795 break
1796 }
1797 }
1798 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1799 nptr += nbit * count
1800 }
1801 }
1802 }
1803
1804
1805
1806
1807
1808
1809
1810 func reflect_gcbits(x any) []byte {
1811 return pointerMask(x)
1812 }
1813
1814
1815
1816
1817 func pointerMask(ep any) (mask []byte) {
1818 e := *efaceOf(&ep)
1819 p := e.data
1820 t := e._type
1821
1822 var et *_type
1823 if t.Kind() != abi.Pointer {
1824 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1825 }
1826 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1827
1828
1829 for _, datap := range activeModules() {
1830
1831 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1832 bitmap := datap.gcdatamask.bytedata
1833 n := et.Size_
1834 mask = make([]byte, n/goarch.PtrSize)
1835 for i := uintptr(0); i < n; i += goarch.PtrSize {
1836 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1837 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1838 }
1839 return
1840 }
1841
1842
1843 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1844 bitmap := datap.gcbssmask.bytedata
1845 n := et.Size_
1846 mask = make([]byte, n/goarch.PtrSize)
1847 for i := uintptr(0); i < n; i += goarch.PtrSize {
1848 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1849 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1850 }
1851 return
1852 }
1853 }
1854
1855
1856 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1857 if s.spanclass.noscan() {
1858 return nil
1859 }
1860 limit := base + s.elemsize
1861
1862
1863
1864
1865 tp := s.typePointersOfUnchecked(base)
1866 base = tp.addr
1867
1868
1869 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1870 for {
1871 var addr uintptr
1872 if tp, addr = tp.next(limit); addr == 0 {
1873 break
1874 }
1875 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1876 }
1877
1878
1879
1880
1881 for i := limit; i < s.elemsize; i++ {
1882 if *(*byte)(unsafe.Pointer(i)) != 0 {
1883 throw("found non-zeroed tail of allocation")
1884 }
1885 }
1886
1887
1888
1889 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1890 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1891 }
1892
1893
1894 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1895 tp = s.typePointersOfType(et, base)
1896 for {
1897 var addr uintptr
1898 if tp, addr = tp.next(limit); addr == 0 {
1899 break
1900 }
1901 maskFromType[(addr-base)/goarch.PtrSize] = 1
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914 differs := false
1915 for i := range maskFromHeap {
1916 if maskFromHeap[i] != maskFromType[i] {
1917 differs = true
1918 break
1919 }
1920 }
1921
1922 if differs {
1923 print("runtime: heap mask=")
1924 for _, b := range maskFromHeap {
1925 print(b)
1926 }
1927 println()
1928 print("runtime: type mask=")
1929 for _, b := range maskFromType {
1930 print(b)
1931 }
1932 println()
1933 print("runtime: type=", toRType(et).string(), "\n")
1934 throw("found two different masks from two different methods")
1935 }
1936
1937
1938 mask = maskFromHeap
1939
1940
1941
1942
1943 KeepAlive(ep)
1944 return
1945 }
1946
1947
1948 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1949 found := false
1950 var u unwinder
1951 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1952 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1953 found = true
1954 break
1955 }
1956 }
1957 if found {
1958 locals, _, _ := u.frame.getStackMap(false)
1959 if locals.n == 0 {
1960 return
1961 }
1962 size := uintptr(locals.n) * goarch.PtrSize
1963 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1964 mask = make([]byte, n/goarch.PtrSize)
1965 for i := uintptr(0); i < n; i += goarch.PtrSize {
1966 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1967 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1968 }
1969 }
1970 return
1971 }
1972
1973
1974
1975
1976 return
1977 }
1978
View as plain text