Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/goexperiment"
62 "internal/runtime/atomic"
63 "internal/runtime/gc"
64 "internal/runtime/sys"
65 "unsafe"
66 )
67
68
69
70
71
72
73
74
75
76 func heapBitsInSpan(userSize uintptr) bool {
77
78
79 return userSize <= gc.MinSizeForMallocHeader
80 }
81
82
83
84
85
86 type typePointers struct {
87
88
89
90 elem uintptr
91
92
93
94 addr uintptr
95
96
97
98
99
100 mask uintptr
101
102
103
104 typ *_type
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
119 base := span.objBase(addr)
120 tp := span.typePointersOfUnchecked(base)
121 if base == addr && size == span.elemsize {
122 return tp
123 }
124 return tp.fastForward(addr-tp.addr, addr+size)
125 }
126
127
128
129
130
131
132
133
134
135 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
136 const doubleCheck = false
137 if doubleCheck && span.objBase(addr) != addr {
138 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
139 throw("typePointersOfUnchecked consisting of non-base-address for object")
140 }
141
142 spc := span.spanclass
143 if spc.noscan() {
144 return typePointers{}
145 }
146 if heapBitsInSpan(span.elemsize) {
147
148 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
149 }
150
151
152 var typ *_type
153 if spc.sizeclass() != 0 {
154
155 typ = *(**_type)(unsafe.Pointer(addr))
156 addr += gc.MallocHeaderSize
157 } else {
158
159
160 typ = (*_type)(atomic.Loadp(unsafe.Pointer(&span.largeType)))
161 if typ == nil {
162
163 return typePointers{}
164 }
165 }
166 gcmask := getGCMask(typ)
167 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
168 }
169
170
171
172
173
174
175
176
177
178
179 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
180 const doubleCheck = false
181 if doubleCheck && typ == nil {
182 throw("bad type passed to typePointersOfType")
183 }
184 if span.spanclass.noscan() {
185 return typePointers{}
186 }
187
188 gcmask := getGCMask(typ)
189 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 func (tp typePointers) nextFast() (typePointers, uintptr) {
213
214 if tp.mask == 0 {
215 return tp, 0
216 }
217
218 var i int
219 if goarch.PtrSize == 8 {
220 i = sys.TrailingZeros64(uint64(tp.mask))
221 } else {
222 i = sys.TrailingZeros32(uint32(tp.mask))
223 }
224 if GOARCH == "amd64" {
225
226 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
227 } else {
228
229 tp.mask &= tp.mask - 1
230 }
231
232 return tp, tp.addr + uintptr(i)*goarch.PtrSize
233 }
234
235
236
237
238
239
240
241
242
243 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
244 for {
245 if tp.mask != 0 {
246 return tp.nextFast()
247 }
248
249
250 if tp.typ == nil {
251 return typePointers{}, 0
252 }
253
254
255 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
256 tp.elem += tp.typ.Size_
257 tp.addr = tp.elem
258 } else {
259 tp.addr += ptrBits * goarch.PtrSize
260 }
261
262
263 if tp.addr >= limit {
264 return typePointers{}, 0
265 }
266
267
268 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
269 if tp.addr+goarch.PtrSize*ptrBits > limit {
270 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
271 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
272 }
273 }
274 }
275
276
277
278
279
280
281
282
283 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
284
285 target := tp.addr + n
286 if target >= limit {
287 return typePointers{}
288 }
289 if tp.typ == nil {
290
291
292 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
293
294 if tp.addr+goarch.PtrSize*ptrBits > limit {
295 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
296 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
297 }
298 return tp
299 }
300
301
302
303 if n >= tp.typ.Size_ {
304
305
306 oldelem := tp.elem
307 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
308 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
309 } else {
310 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
311 }
312
313 if tp.addr-tp.elem >= tp.typ.PtrBytes {
314
315
316 tp.elem += tp.typ.Size_
317 tp.addr = tp.elem
318 tp.mask = readUintptr(getGCMask(tp.typ))
319
320
321 if tp.addr >= limit {
322 return typePointers{}
323 }
324 } else {
325
326
327 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
328 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
329 }
330 if tp.addr+goarch.PtrSize*ptrBits > limit {
331 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
332 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
333 }
334 return tp
335 }
336
337
338
339
340
341
342 func (span *mspan) objBase(addr uintptr) uintptr {
343 return span.base() + span.objIndex(addr)*span.elemsize
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
389 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
390 throw("bulkBarrierPreWrite: unaligned arguments")
391 }
392 if !writeBarrier.enabled {
393 return
394 }
395 s := spanOf(dst)
396 if s == nil {
397
398
399 for _, datap := range activeModules() {
400 if datap.data <= dst && dst < datap.edata {
401 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
402 return
403 }
404 }
405 for _, datap := range activeModules() {
406 if datap.bss <= dst && dst < datap.ebss {
407 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
408 return
409 }
410 }
411 return
412 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
413
414
415
416
417
418
419 return
420 }
421 buf := &getg().m.p.ptr().wbBuf
422
423
424 const doubleCheck = false
425 if doubleCheck {
426 doubleCheckTypePointersOfType(s, typ, dst, size)
427 }
428
429 var tp typePointers
430 if typ != nil {
431 tp = s.typePointersOfType(typ, dst)
432 } else {
433 tp = s.typePointersOf(dst, size)
434 }
435 if src == 0 {
436 for {
437 var addr uintptr
438 if tp, addr = tp.next(dst + size); addr == 0 {
439 break
440 }
441 dstx := (*uintptr)(unsafe.Pointer(addr))
442 p := buf.get1()
443 p[0] = *dstx
444 }
445 } else {
446 for {
447 var addr uintptr
448 if tp, addr = tp.next(dst + size); addr == 0 {
449 break
450 }
451 dstx := (*uintptr)(unsafe.Pointer(addr))
452 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
453 p := buf.get2()
454 p[0] = *dstx
455 p[1] = *srcx
456 }
457 }
458 }
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
475 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
476 throw("bulkBarrierPreWrite: unaligned arguments")
477 }
478 if !writeBarrier.enabled {
479 return
480 }
481 buf := &getg().m.p.ptr().wbBuf
482 s := spanOf(dst)
483
484
485 const doubleCheck = false
486 if doubleCheck {
487 doubleCheckTypePointersOfType(s, typ, dst, size)
488 }
489
490 var tp typePointers
491 if typ != nil {
492 tp = s.typePointersOfType(typ, dst)
493 } else {
494 tp = s.typePointersOf(dst, size)
495 }
496 for {
497 var addr uintptr
498 if tp, addr = tp.next(dst + size); addr == 0 {
499 break
500 }
501 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
502 p := buf.get1()
503 p[0] = *srcx
504 }
505 }
506
507
508 func (s *mspan) initHeapBits() {
509 if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
510 b := s.heapBits()
511 for i := range b {
512 b[i] = ^uintptr(0)
513 }
514 } else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
515 b := s.heapBits()
516 clear(b)
517 }
518 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) {
519 s.initInlineMarkBits()
520 }
521 }
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537 func (span *mspan) heapBits() []uintptr {
538 const doubleCheck = false
539
540 if doubleCheck && !span.isUserArenaChunk {
541 if span.spanclass.noscan() {
542 throw("heapBits called for noscan")
543 }
544 if span.elemsize > gc.MinSizeForMallocHeader {
545 throw("heapBits called for span class that should have a malloc header")
546 }
547 }
548
549
550
551 if span.npages == 1 {
552
553 return heapBitsSlice(span.base(), pageSize, span.elemsize)
554 }
555 return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize)
556 }
557
558
559
560
561 func heapBitsSlice(spanBase, spanSize, elemsize uintptr) []uintptr {
562 base, bitmapSize := spanHeapBitsRange(spanBase, spanSize, elemsize)
563 elems := int(bitmapSize / goarch.PtrSize)
564 var sl notInHeapSlice
565 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(base)), elems, elems}
566 return *(*[]uintptr)(unsafe.Pointer(&sl))
567 }
568
569
570 func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) {
571 size = spanSize / goarch.PtrSize / 8
572 base = spanBase + spanSize - size
573 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {
574 base -= unsafe.Sizeof(spanInlineMarkBits{})
575 }
576 return
577 }
578
579
580
581
582
583
584
585 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
586 hbitsBase, _ := spanHeapBitsRange(span.base(), span.npages*pageSize, span.elemsize)
587 hbits := (*byte)(unsafe.Pointer(hbitsBase))
588
589
590
591
592
593
594
595
596
597 i := (addr - span.base()) / goarch.PtrSize / ptrBits
598 j := (addr - span.base()) / goarch.PtrSize % ptrBits
599 bits := span.elemsize / goarch.PtrSize
600 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
601 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
602
603 var read uintptr
604 if j+bits > ptrBits {
605
606 bits0 := ptrBits - j
607 bits1 := bits - bits0
608 read = *word0 >> j
609 read |= (*word1 & ((1 << bits1) - 1)) << bits0
610 } else {
611
612 read = (*word0 >> j) & ((1 << bits) - 1)
613 }
614 return read
615 }
616
617
618
619
620
621
622
623
624 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
625
626 src0 := readUintptr(getGCMask(typ))
627
628
629 scanSize = typ.PtrBytes
630 src := src0
631 if typ.Size_ == goarch.PtrSize {
632 src = (1 << (dataSize / goarch.PtrSize)) - 1
633 } else {
634
635
636
637 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
638 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
639 }
640 for i := typ.Size_; i < dataSize; i += typ.Size_ {
641 src |= src0 << (i / goarch.PtrSize)
642 scanSize += typ.Size_
643 }
644 if asanenabled {
645
646
647 src &= (1 << (dataSize / goarch.PtrSize)) - 1
648 }
649 }
650
651
652
653 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)
654 dst := unsafe.Pointer(dstBase)
655 o := (x - span.base()) / goarch.PtrSize
656 i := o / ptrBits
657 j := o % ptrBits
658 bits := span.elemsize / goarch.PtrSize
659 if j+bits > ptrBits {
660
661 bits0 := ptrBits - j
662 bits1 := bits - bits0
663 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
664 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
665 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
666 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
667 } else {
668
669 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
670 *dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
671 }
672
673 const doubleCheck = false
674 if doubleCheck {
675 srcRead := span.heapBitsSmallForAddr(x)
676 if srcRead != src {
677 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
678 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
679 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
680 throw("bad pointer bits written for small object")
681 }
682 }
683 return
684 }
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703 const doubleCheckHeapSetType = doubleCheckMalloc
704
705 func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
706 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
707 throw("tried to write heap bits, but no heap bits in span")
708 }
709 scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
710 if doubleCheckHeapSetType {
711 doubleCheckHeapType(x, dataSize, typ, nil, span)
712 }
713 return scanSize
714 }
715
716 func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
717 if header == nil {
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735 throw("runtime: pointer to heap type header nil?")
736 }
737 *header = typ
738 if doubleCheckHeapSetType {
739 doubleCheckHeapType(x, dataSize, typ, header, span)
740 }
741 return span.elemsize
742 }
743
744 func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
745 gctyp := typ
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 atomic.StorepNoWB(unsafe.Pointer(&span.largeType), unsafe.Pointer(gctyp))
796 if doubleCheckHeapSetType {
797 doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
798 }
799 return span.elemsize
800 }
801
802 func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
803 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
804
805
806
807
808 maxIterBytes := span.elemsize
809 if header == nil {
810 maxIterBytes = dataSize
811 }
812 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
813 size := dataSize - off
814 if size == 0 {
815 off -= goarch.PtrSize
816 size += goarch.PtrSize
817 }
818 interior := x + off
819 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
820 if size == 0 {
821 size = goarch.PtrSize
822 }
823
824 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
825 if interior+size > x+maxIterBytes {
826 size = x + maxIterBytes - interior
827 }
828 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
829 }
830
831 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
832
833 tp := span.typePointersOfUnchecked(span.objBase(x))
834 maxIterBytes := span.elemsize
835 if header == nil {
836 maxIterBytes = dataSize
837 }
838 bad := false
839 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
840
841 want := false
842 if i < span.elemsize {
843 off := i % typ.Size_
844 if off < typ.PtrBytes {
845 j := off / goarch.PtrSize
846 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
847 }
848 }
849 if want {
850 var addr uintptr
851 tp, addr = tp.next(x + span.elemsize)
852 if addr == 0 {
853 println("runtime: found bad iterator")
854 }
855 if addr != x+i {
856 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
857 bad = true
858 }
859 }
860 }
861 if !bad {
862 var addr uintptr
863 tp, addr = tp.next(x + span.elemsize)
864 if addr == 0 {
865 return
866 }
867 println("runtime: extra pointer:", hex(addr))
868 }
869 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
870 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
871 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
872 print("runtime: limit=", hex(x+span.elemsize), "\n")
873 tp = span.typePointersOfUnchecked(x)
874 dumpTypePointers(tp)
875 for {
876 var addr uintptr
877 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
878 println("runtime: would've stopped here")
879 dumpTypePointers(tp)
880 break
881 }
882 print("runtime: addr=", hex(addr), "\n")
883 dumpTypePointers(tp)
884 }
885 throw("heapSetType: pointer entry not correct")
886 }
887
888 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
889 bad := false
890 if interior < x {
891 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
892 throw("found bad interior pointer")
893 }
894 off := interior - x
895 tp := span.typePointersOf(interior, size)
896 for i := off; i < off+size; i += goarch.PtrSize {
897
898 want := false
899 if i < span.elemsize {
900 off := i % typ.Size_
901 if off < typ.PtrBytes {
902 j := off / goarch.PtrSize
903 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
904 }
905 }
906 if want {
907 var addr uintptr
908 tp, addr = tp.next(interior + size)
909 if addr == 0 {
910 println("runtime: found bad iterator")
911 bad = true
912 }
913 if addr != x+i {
914 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
915 bad = true
916 }
917 }
918 }
919 if !bad {
920 var addr uintptr
921 tp, addr = tp.next(interior + size)
922 if addr == 0 {
923 return
924 }
925 println("runtime: extra pointer:", hex(addr))
926 }
927 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
928 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
929 print("runtime: limit=", hex(interior+size), "\n")
930 tp = span.typePointersOf(interior, size)
931 dumpTypePointers(tp)
932 for {
933 var addr uintptr
934 if tp, addr = tp.next(interior + size); addr == 0 {
935 println("runtime: would've stopped here")
936 dumpTypePointers(tp)
937 break
938 }
939 print("runtime: addr=", hex(addr), "\n")
940 dumpTypePointers(tp)
941 }
942
943 print("runtime: want: ")
944 for i := off; i < off+size; i += goarch.PtrSize {
945
946 want := false
947 if i < dataSize {
948 off := i % typ.Size_
949 if off < typ.PtrBytes {
950 j := off / goarch.PtrSize
951 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
952 }
953 }
954 if want {
955 print("1")
956 } else {
957 print("0")
958 }
959 }
960 println()
961
962 throw("heapSetType: pointer entry not correct")
963 }
964
965
966 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
967 if typ == nil {
968 return
969 }
970 if typ.Kind() == abi.Interface {
971
972
973
974 return
975 }
976 tp0 := s.typePointersOfType(typ, addr)
977 tp1 := s.typePointersOf(addr, size)
978 failed := false
979 for {
980 var addr0, addr1 uintptr
981 tp0, addr0 = tp0.next(addr + size)
982 tp1, addr1 = tp1.next(addr + size)
983 if addr0 != addr1 {
984 failed = true
985 break
986 }
987 if addr0 == 0 {
988 break
989 }
990 }
991 if failed {
992 tp0 := s.typePointersOfType(typ, addr)
993 tp1 := s.typePointersOf(addr, size)
994 print("runtime: addr=", hex(addr), " size=", size, "\n")
995 print("runtime: type=", toRType(typ).string(), "\n")
996 dumpTypePointers(tp0)
997 dumpTypePointers(tp1)
998 for {
999 var addr0, addr1 uintptr
1000 tp0, addr0 = tp0.next(addr + size)
1001 tp1, addr1 = tp1.next(addr + size)
1002 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
1003 if addr0 == 0 && addr1 == 0 {
1004 break
1005 }
1006 }
1007 throw("mismatch between typePointersOfType and typePointersOf")
1008 }
1009 }
1010
1011 func dumpTypePointers(tp typePointers) {
1012 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
1013 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
1014 for i := uintptr(0); i < ptrBits; i++ {
1015 if tp.mask&(uintptr(1)<<i) != 0 {
1016 print("1")
1017 } else {
1018 print("0")
1019 }
1020 }
1021 println()
1022 }
1023
1024
1025
1026
1027
1028 func addb(p *byte, n uintptr) *byte {
1029
1030
1031
1032 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
1033 }
1034
1035
1036
1037
1038
1039 func subtractb(p *byte, n uintptr) *byte {
1040
1041
1042
1043 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
1044 }
1045
1046
1047
1048
1049
1050 func add1(p *byte) *byte {
1051
1052
1053
1054 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1055 }
1056
1057
1058
1059
1060
1061
1062
1063 func subtract1(p *byte) *byte {
1064
1065
1066
1067 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1068 }
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 type markBits struct {
1080 bytep *uint8
1081 mask uint8
1082 index uintptr
1083 }
1084
1085
1086 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1087 bytep, mask := s.allocBits.bitp(allocBitIndex)
1088 return markBits{bytep, mask, allocBitIndex}
1089 }
1090
1091
1092
1093
1094
1095 func (s *mspan) refillAllocCache(whichByte uint16) {
1096 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1097 aCache := uint64(0)
1098 aCache |= uint64(bytes[0])
1099 aCache |= uint64(bytes[1]) << (1 * 8)
1100 aCache |= uint64(bytes[2]) << (2 * 8)
1101 aCache |= uint64(bytes[3]) << (3 * 8)
1102 aCache |= uint64(bytes[4]) << (4 * 8)
1103 aCache |= uint64(bytes[5]) << (5 * 8)
1104 aCache |= uint64(bytes[6]) << (6 * 8)
1105 aCache |= uint64(bytes[7]) << (7 * 8)
1106 s.allocCache = ^aCache
1107 }
1108
1109
1110
1111
1112
1113 func (s *mspan) nextFreeIndex() uint16 {
1114 sfreeindex := s.freeindex
1115 snelems := s.nelems
1116 if sfreeindex == snelems {
1117 return sfreeindex
1118 }
1119 if sfreeindex > snelems {
1120 throw("s.freeindex > s.nelems")
1121 }
1122
1123 aCache := s.allocCache
1124
1125 bitIndex := sys.TrailingZeros64(aCache)
1126 for bitIndex == 64 {
1127
1128 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1129 if sfreeindex >= snelems {
1130 s.freeindex = snelems
1131 return snelems
1132 }
1133 whichByte := sfreeindex / 8
1134
1135 s.refillAllocCache(whichByte)
1136 aCache = s.allocCache
1137 bitIndex = sys.TrailingZeros64(aCache)
1138
1139
1140 }
1141 result := sfreeindex + uint16(bitIndex)
1142 if result >= snelems {
1143 s.freeindex = snelems
1144 return snelems
1145 }
1146
1147 s.allocCache >>= uint(bitIndex + 1)
1148 sfreeindex = result + 1
1149
1150 if sfreeindex%64 == 0 && sfreeindex != snelems {
1151
1152
1153
1154
1155
1156 whichByte := sfreeindex / 8
1157 s.refillAllocCache(whichByte)
1158 }
1159 s.freeindex = sfreeindex
1160 return result
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 func (s *mspan) isFree(index uintptr) bool {
1173 if index < uintptr(s.freeindex) {
1174 return false
1175 }
1176 bytep, mask := s.allocBits.bitp(index)
1177 return *bytep&mask == 0
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 func (s *mspan) isFreeOrNewlyAllocated(index uintptr) bool {
1194 if index < uintptr(s.freeIndexForScan) {
1195 return false
1196 }
1197 bytep, mask := s.allocBits.bitp(index)
1198 return *bytep&mask == 0
1199 }
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1210 const doubleCheck = false
1211
1212
1213 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1214
1215 if doubleCheck && q != n/s.elemsize {
1216 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1217 throw("bad magic division")
1218 }
1219 return q
1220 }
1221
1222
1223
1224
1225 func (s *mspan) objIndex(p uintptr) uintptr {
1226 return s.divideByElemSize(p - s.base())
1227 }
1228
1229 func markBitsForAddr(p uintptr) markBits {
1230 s := spanOf(p)
1231 objIndex := s.objIndex(p)
1232 return s.markBitsForIndex(objIndex)
1233 }
1234
1235
1236 func (m markBits) isMarked() bool {
1237 return *m.bytep&m.mask != 0
1238 }
1239
1240
1241 func (m markBits) setMarked() {
1242
1243
1244
1245 atomic.Or8(m.bytep, m.mask)
1246 }
1247
1248
1249 func (m markBits) setMarkedNonAtomic() {
1250 *m.bytep |= m.mask
1251 }
1252
1253
1254 func (m markBits) clearMarked() {
1255
1256
1257
1258 atomic.And8(m.bytep, ^m.mask)
1259 }
1260
1261
1262 func markBitsForSpan(base uintptr) (mbits markBits) {
1263 mbits = markBitsForAddr(base)
1264 if mbits.mask != 1 {
1265 throw("markBitsForSpan: unaligned start")
1266 }
1267 return mbits
1268 }
1269
1270
1271
1272
1273 func isMarkedOrNotInHeap(p unsafe.Pointer) bool {
1274 obj, span, objIndex := findObject(uintptr(p), 0, 0)
1275 if obj != 0 {
1276 mbits := span.markBitsForIndex(objIndex)
1277 return mbits.isMarked()
1278 }
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 return true
1290 }
1291
1292
1293 func (m *markBits) advance() {
1294 if m.mask == 1<<7 {
1295 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1296 m.mask = 1
1297 } else {
1298 m.mask = m.mask << 1
1299 }
1300 m.index++
1301 }
1302
1303
1304
1305 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1306
1307
1308 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1309
1310
1311
1312
1313
1314
1315
1316
1317 printlock()
1318 print("runtime: pointer ", hex(p))
1319 if s != nil {
1320 state := s.state.get()
1321 if state != mSpanInUse {
1322 print(" to unallocated span")
1323 } else {
1324 print(" to unused region of span")
1325 }
1326 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1327 }
1328 print("\n")
1329 if refBase != 0 {
1330 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1331 gcDumpObject("object", refBase, refOff)
1332 }
1333 getg().m.traceback = 2
1334 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1362 s = spanOf(p)
1363
1364
1365 if s == nil {
1366 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1367
1368
1369
1370 badPointer(s, p, refBase, refOff)
1371 }
1372 return
1373 }
1374
1375
1376
1377
1378 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1379
1380 if state == mSpanManual {
1381 return
1382 }
1383
1384
1385 if debug.invalidptr != 0 {
1386 badPointer(s, p, refBase, refOff)
1387 }
1388 return
1389 }
1390
1391 objIndex = s.objIndex(p)
1392 base = s.base() + objIndex*s.elemsize
1393 return
1394 }
1395
1396
1397
1398
1399 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1400
1401
1402
1403 return spanOf(p) == nil && p != clobberdeadPtr
1404 }
1405
1406 const ptrBits = 8 * goarch.PtrSize
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1417 word := maskOffset / goarch.PtrSize
1418 bits = addb(bits, word/8)
1419 mask := uint8(1) << (word % 8)
1420
1421 buf := &getg().m.p.ptr().wbBuf
1422 for i := uintptr(0); i < size; i += goarch.PtrSize {
1423 if mask == 0 {
1424 bits = addb(bits, 1)
1425 if *bits == 0 {
1426
1427 i += 7 * goarch.PtrSize
1428 continue
1429 }
1430 mask = 1
1431 }
1432 if *bits&mask != 0 {
1433 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1434 if src == 0 {
1435 p := buf.get1()
1436 p[0] = *dstx
1437 } else {
1438 srcx := (*uintptr)(unsafe.Pointer(src + i))
1439 p := buf.get2()
1440 p[0] = *dstx
1441 p[1] = *srcx
1442 }
1443 }
1444 mask <<= 1
1445 }
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1463 if typ == nil {
1464 throw("runtime: typeBitsBulkBarrier without type")
1465 }
1466 if typ.Size_ != size {
1467 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1468 throw("runtime: invalid typeBitsBulkBarrier")
1469 }
1470 if !writeBarrier.enabled {
1471 return
1472 }
1473 ptrmask := getGCMask(typ)
1474 buf := &getg().m.p.ptr().wbBuf
1475 var bits uint32
1476 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1477 if i&(goarch.PtrSize*8-1) == 0 {
1478 bits = uint32(*ptrmask)
1479 ptrmask = addb(ptrmask, 1)
1480 } else {
1481 bits = bits >> 1
1482 }
1483 if bits&1 != 0 {
1484 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1485 srcx := (*uintptr)(unsafe.Pointer(src + i))
1486 p := buf.get2()
1487 p[0] = *dstx
1488 p[1] = *srcx
1489 }
1490 }
1491 }
1492
1493
1494
1495 func (s *mspan) countAlloc() int {
1496 count := 0
1497 bytes := divRoundUp(uintptr(s.nelems), 8)
1498
1499
1500
1501
1502 for i := uintptr(0); i < bytes; i += 8 {
1503
1504
1505
1506
1507 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1508 count += sys.OnesCount64(mrkBits)
1509 }
1510 return count
1511 }
1512
1513
1514
1515 func readUintptr(p *byte) uintptr {
1516 x := *(*uintptr)(unsafe.Pointer(p))
1517 if goarch.BigEndian {
1518 if goarch.PtrSize == 8 {
1519 return uintptr(sys.Bswap64(uint64(x)))
1520 }
1521 return uintptr(sys.Bswap32(uint32(x)))
1522 }
1523 return x
1524 }
1525
1526 var debugPtrmask struct {
1527 lock mutex
1528 data *byte
1529 }
1530
1531
1532
1533
1534 func progToPointerMask(prog *byte, size uintptr) bitvector {
1535 n := (size/goarch.PtrSize + 7) / 8
1536 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1537 x[len(x)-1] = 0xa1
1538 n = runGCProg(prog, &x[0])
1539 if x[len(x)-1] != 0xa1 {
1540 throw("progToPointerMask: overflow")
1541 }
1542 return bitvector{int32(n), &x[0]}
1543 }
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 func runGCProg(prog, dst *byte) uintptr {
1564 dstStart := dst
1565
1566
1567 var bits uintptr
1568 var nbits uintptr
1569
1570 p := prog
1571 Run:
1572 for {
1573
1574
1575 for ; nbits >= 8; nbits -= 8 {
1576 *dst = uint8(bits)
1577 dst = add1(dst)
1578 bits >>= 8
1579 }
1580
1581
1582 inst := uintptr(*p)
1583 p = add1(p)
1584 n := inst & 0x7F
1585 if inst&0x80 == 0 {
1586
1587 if n == 0 {
1588
1589 break Run
1590 }
1591 nbyte := n / 8
1592 for i := uintptr(0); i < nbyte; i++ {
1593 bits |= uintptr(*p) << nbits
1594 p = add1(p)
1595 *dst = uint8(bits)
1596 dst = add1(dst)
1597 bits >>= 8
1598 }
1599 if n %= 8; n > 0 {
1600 bits |= uintptr(*p) << nbits
1601 p = add1(p)
1602 nbits += n
1603 }
1604 continue Run
1605 }
1606
1607
1608 if n == 0 {
1609 for off := uint(0); ; off += 7 {
1610 x := uintptr(*p)
1611 p = add1(p)
1612 n |= (x & 0x7F) << off
1613 if x&0x80 == 0 {
1614 break
1615 }
1616 }
1617 }
1618
1619
1620 c := uintptr(0)
1621 for off := uint(0); ; off += 7 {
1622 x := uintptr(*p)
1623 p = add1(p)
1624 c |= (x & 0x7F) << off
1625 if x&0x80 == 0 {
1626 break
1627 }
1628 }
1629 c *= n
1630
1631
1632
1633
1634
1635
1636
1637
1638 src := dst
1639 const maxBits = goarch.PtrSize*8 - 7
1640 if n <= maxBits {
1641
1642 pattern := bits
1643 npattern := nbits
1644
1645
1646 src = subtract1(src)
1647 for npattern < n {
1648 pattern <<= 8
1649 pattern |= uintptr(*src)
1650 src = subtract1(src)
1651 npattern += 8
1652 }
1653
1654
1655
1656
1657
1658 if npattern > n {
1659 pattern >>= npattern - n
1660 npattern = n
1661 }
1662
1663
1664 if npattern == 1 {
1665
1666
1667
1668
1669
1670
1671 if pattern == 1 {
1672 pattern = 1<<maxBits - 1
1673 npattern = maxBits
1674 } else {
1675 npattern = c
1676 }
1677 } else {
1678 b := pattern
1679 nb := npattern
1680 if nb+nb <= maxBits {
1681
1682 for nb <= goarch.PtrSize*8 {
1683 b |= b << nb
1684 nb += nb
1685 }
1686
1687
1688 nb = maxBits / npattern * npattern
1689 b &= 1<<nb - 1
1690 pattern = b
1691 npattern = nb
1692 }
1693 }
1694
1695
1696
1697
1698 for ; c >= npattern; c -= npattern {
1699 bits |= pattern << nbits
1700 nbits += npattern
1701 for nbits >= 8 {
1702 *dst = uint8(bits)
1703 dst = add1(dst)
1704 bits >>= 8
1705 nbits -= 8
1706 }
1707 }
1708
1709
1710 if c > 0 {
1711 pattern &= 1<<c - 1
1712 bits |= pattern << nbits
1713 nbits += c
1714 }
1715 continue Run
1716 }
1717
1718
1719
1720
1721 off := n - nbits
1722
1723 src = subtractb(src, (off+7)/8)
1724 if frag := off & 7; frag != 0 {
1725 bits |= uintptr(*src) >> (8 - frag) << nbits
1726 src = add1(src)
1727 nbits += frag
1728 c -= frag
1729 }
1730
1731
1732 for i := c / 8; i > 0; i-- {
1733 bits |= uintptr(*src) << nbits
1734 src = add1(src)
1735 *dst = uint8(bits)
1736 dst = add1(dst)
1737 bits >>= 8
1738 }
1739
1740 if c %= 8; c > 0 {
1741 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1742 nbits += c
1743 }
1744 }
1745
1746
1747 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1748 nbits += -nbits & 7
1749 for ; nbits > 0; nbits -= 8 {
1750 *dst = uint8(bits)
1751 dst = add1(dst)
1752 bits >>= 8
1753 }
1754 return totalBits
1755 }
1756
1757 func dumpGCProg(p *byte) {
1758 nptr := 0
1759 for {
1760 x := *p
1761 p = add1(p)
1762 if x == 0 {
1763 print("\t", nptr, " end\n")
1764 break
1765 }
1766 if x&0x80 == 0 {
1767 print("\t", nptr, " lit ", x, ":")
1768 n := int(x+7) / 8
1769 for i := 0; i < n; i++ {
1770 print(" ", hex(*p))
1771 p = add1(p)
1772 }
1773 print("\n")
1774 nptr += int(x)
1775 } else {
1776 nbit := int(x &^ 0x80)
1777 if nbit == 0 {
1778 for nb := uint(0); ; nb += 7 {
1779 x := *p
1780 p = add1(p)
1781 nbit |= int(x&0x7f) << nb
1782 if x&0x80 == 0 {
1783 break
1784 }
1785 }
1786 }
1787 count := 0
1788 for nb := uint(0); ; nb += 7 {
1789 x := *p
1790 p = add1(p)
1791 count |= int(x&0x7f) << nb
1792 if x&0x80 == 0 {
1793 break
1794 }
1795 }
1796 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1797 nptr += nbit * count
1798 }
1799 }
1800 }
1801
1802
1803
1804
1805
1806
1807
1808 func reflect_gcbits(x any) []byte {
1809 return pointerMask(x)
1810 }
1811
1812
1813
1814
1815 func pointerMask(ep any) (mask []byte) {
1816 e := *efaceOf(&ep)
1817 p := e.data
1818 t := e._type
1819
1820 var et *_type
1821 if t.Kind() != abi.Pointer {
1822 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1823 }
1824 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1825
1826
1827 for _, datap := range activeModules() {
1828
1829 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1830 bitmap := datap.gcdatamask.bytedata
1831 n := et.Size_
1832 mask = make([]byte, n/goarch.PtrSize)
1833 for i := uintptr(0); i < n; i += goarch.PtrSize {
1834 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1835 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1836 }
1837 return
1838 }
1839
1840
1841 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1842 bitmap := datap.gcbssmask.bytedata
1843 n := et.Size_
1844 mask = make([]byte, n/goarch.PtrSize)
1845 for i := uintptr(0); i < n; i += goarch.PtrSize {
1846 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1847 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1848 }
1849 return
1850 }
1851 }
1852
1853
1854 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1855 if s.spanclass.noscan() {
1856 return nil
1857 }
1858 limit := base + s.elemsize
1859
1860
1861
1862
1863 tp := s.typePointersOfUnchecked(base)
1864 base = tp.addr
1865
1866
1867 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1868 for {
1869 var addr uintptr
1870 if tp, addr = tp.next(limit); addr == 0 {
1871 break
1872 }
1873 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1874 }
1875
1876
1877
1878
1879 for i := limit; i < s.elemsize; i++ {
1880 if *(*byte)(unsafe.Pointer(i)) != 0 {
1881 throw("found non-zeroed tail of allocation")
1882 }
1883 }
1884
1885
1886
1887 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1888 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1889 }
1890
1891
1892 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1893 tp = s.typePointersOfType(et, base)
1894 for {
1895 var addr uintptr
1896 if tp, addr = tp.next(limit); addr == 0 {
1897 break
1898 }
1899 maskFromType[(addr-base)/goarch.PtrSize] = 1
1900 }
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912 differs := false
1913 for i := range maskFromHeap {
1914 if maskFromHeap[i] != maskFromType[i] {
1915 differs = true
1916 break
1917 }
1918 }
1919
1920 if differs {
1921 print("runtime: heap mask=")
1922 for _, b := range maskFromHeap {
1923 print(b)
1924 }
1925 println()
1926 print("runtime: type mask=")
1927 for _, b := range maskFromType {
1928 print(b)
1929 }
1930 println()
1931 print("runtime: type=", toRType(et).string(), "\n")
1932 throw("found two different masks from two different methods")
1933 }
1934
1935
1936 mask = maskFromHeap
1937
1938
1939
1940
1941 KeepAlive(ep)
1942 return
1943 }
1944
1945
1946 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1947 found := false
1948 var u unwinder
1949 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1950 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1951 found = true
1952 break
1953 }
1954 }
1955 if found {
1956 locals, _, _ := u.frame.getStackMap(false)
1957 if locals.n == 0 {
1958 return
1959 }
1960 size := uintptr(locals.n) * goarch.PtrSize
1961 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1962 mask = make([]byte, n/goarch.PtrSize)
1963 for i := uintptr(0); i < n; i += goarch.PtrSize {
1964 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1965 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1966 }
1967 }
1968 return
1969 }
1970
1971
1972
1973
1974 return
1975 }
1976
View as plain text