1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "internal/abi"
58 "internal/goarch"
59 "internal/runtime/atomic"
60 "runtime/internal/math"
61 "unsafe"
62 )
63
64 const (
65
66 bucketCntBits = abi.MapBucketCountBits
67
68
69
70
71 loadFactorDen = 2
72 loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
73
74
75
76
77 dataOffset = unsafe.Offsetof(struct {
78 b bmap
79 v int64
80 }{}.v)
81
82
83
84
85
86 emptyRest = 0
87 emptyOne = 1
88 evacuatedX = 2
89 evacuatedY = 3
90 evacuatedEmpty = 4
91 minTopHash = 5
92
93
94 iterator = 1
95 oldIterator = 2
96 hashWriting = 4
97 sameSizeGrow = 8
98
99
100 noCheck = 1<<(8*goarch.PtrSize) - 1
101 )
102
103
104 func isEmpty(x uint8) bool {
105 return x <= emptyOne
106 }
107
108
109 type hmap struct {
110
111
112 count int
113 flags uint8
114 B uint8
115 noverflow uint16
116 hash0 uint32
117
118 buckets unsafe.Pointer
119 oldbuckets unsafe.Pointer
120 nevacuate uintptr
121
122 extra *mapextra
123 }
124
125
126 type mapextra struct {
127
128
129
130
131
132
133
134
135 overflow *[]*bmap
136 oldoverflow *[]*bmap
137
138
139 nextOverflow *bmap
140 }
141
142
143 type bmap struct {
144
145
146
147 tophash [abi.MapBucketCount]uint8
148
149
150
151
152
153 }
154
155
156
157
158 type hiter struct {
159 key unsafe.Pointer
160 elem unsafe.Pointer
161 t *maptype
162 h *hmap
163 buckets unsafe.Pointer
164 bptr *bmap
165 overflow *[]*bmap
166 oldoverflow *[]*bmap
167 startBucket uintptr
168 offset uint8
169 wrapped bool
170 B uint8
171 i uint8
172 bucket uintptr
173 checkBucket uintptr
174 }
175
176
177 func bucketShift(b uint8) uintptr {
178
179 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
180 }
181
182
183 func bucketMask(b uint8) uintptr {
184 return bucketShift(b) - 1
185 }
186
187
188 func tophash(hash uintptr) uint8 {
189 top := uint8(hash >> (goarch.PtrSize*8 - 8))
190 if top < minTopHash {
191 top += minTopHash
192 }
193 return top
194 }
195
196 func evacuated(b *bmap) bool {
197 h := b.tophash[0]
198 return h > emptyOne && h < minTopHash
199 }
200
201 func (b *bmap) overflow(t *maptype) *bmap {
202 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
203 }
204
205 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
206 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
207 }
208
209 func (b *bmap) keys() unsafe.Pointer {
210 return add(unsafe.Pointer(b), dataOffset)
211 }
212
213
214
215
216
217
218
219
220 func (h *hmap) incrnoverflow() {
221
222
223
224 if h.B < 16 {
225 h.noverflow++
226 return
227 }
228
229
230
231 mask := uint32(1)<<(h.B-15) - 1
232
233
234 if uint32(rand())&mask == 0 {
235 h.noverflow++
236 }
237 }
238
239 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
240 var ovf *bmap
241 if h.extra != nil && h.extra.nextOverflow != nil {
242
243
244 ovf = h.extra.nextOverflow
245 if ovf.overflow(t) == nil {
246
247 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
248 } else {
249
250
251
252 ovf.setoverflow(t, nil)
253 h.extra.nextOverflow = nil
254 }
255 } else {
256 ovf = (*bmap)(newobject(t.Bucket))
257 }
258 h.incrnoverflow()
259 if !t.Bucket.Pointers() {
260 h.createOverflow()
261 *h.extra.overflow = append(*h.extra.overflow, ovf)
262 }
263 b.setoverflow(t, ovf)
264 return ovf
265 }
266
267 func (h *hmap) createOverflow() {
268 if h.extra == nil {
269 h.extra = new(mapextra)
270 }
271 if h.extra.overflow == nil {
272 h.extra.overflow = new([]*bmap)
273 }
274 }
275
276 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
277 if int64(int(hint)) != hint {
278 hint = 0
279 }
280 return makemap(t, int(hint), h)
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 func makemap_small() *hmap {
297 h := new(hmap)
298 h.hash0 = uint32(rand())
299 return h
300 }
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 func makemap(t *maptype, hint int, h *hmap) *hmap {
319 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
320 if overflow || mem > maxAlloc {
321 hint = 0
322 }
323
324
325 if h == nil {
326 h = new(hmap)
327 }
328 h.hash0 = uint32(rand())
329
330
331
332 B := uint8(0)
333 for overLoadFactor(hint, B) {
334 B++
335 }
336 h.B = B
337
338
339
340
341 if h.B != 0 {
342 var nextOverflow *bmap
343 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
344 if nextOverflow != nil {
345 h.extra = new(mapextra)
346 h.extra.nextOverflow = nextOverflow
347 }
348 }
349
350 return h
351 }
352
353
354
355
356
357
358
359 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
360 base := bucketShift(b)
361 nbuckets := base
362
363
364 if b >= 4 {
365
366
367
368 nbuckets += bucketShift(b - 4)
369 sz := t.Bucket.Size_ * nbuckets
370 up := roundupsize(sz, !t.Bucket.Pointers())
371 if up != sz {
372 nbuckets = up / t.Bucket.Size_
373 }
374 }
375
376 if dirtyalloc == nil {
377 buckets = newarray(t.Bucket, int(nbuckets))
378 } else {
379
380
381
382 buckets = dirtyalloc
383 size := t.Bucket.Size_ * nbuckets
384 if t.Bucket.Pointers() {
385 memclrHasPointers(buckets, size)
386 } else {
387 memclrNoHeapPointers(buckets, size)
388 }
389 }
390
391 if base != nbuckets {
392
393
394
395
396
397 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
398 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
399 last.setoverflow(t, (*bmap)(buckets))
400 }
401 return buckets, nextOverflow
402 }
403
404
405
406
407
408
409 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
410 if raceenabled && h != nil {
411 callerpc := getcallerpc()
412 pc := abi.FuncPCABIInternal(mapaccess1)
413 racereadpc(unsafe.Pointer(h), callerpc, pc)
414 raceReadObjectPC(t.Key, key, callerpc, pc)
415 }
416 if msanenabled && h != nil {
417 msanread(key, t.Key.Size_)
418 }
419 if asanenabled && h != nil {
420 asanread(key, t.Key.Size_)
421 }
422 if h == nil || h.count == 0 {
423 if err := mapKeyError(t, key); err != nil {
424 panic(err)
425 }
426 return unsafe.Pointer(&zeroVal[0])
427 }
428 if h.flags&hashWriting != 0 {
429 fatal("concurrent map read and map write")
430 }
431 hash := t.Hasher(key, uintptr(h.hash0))
432 m := bucketMask(h.B)
433 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
434 if c := h.oldbuckets; c != nil {
435 if !h.sameSizeGrow() {
436
437 m >>= 1
438 }
439 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
440 if !evacuated(oldb) {
441 b = oldb
442 }
443 }
444 top := tophash(hash)
445 bucketloop:
446 for ; b != nil; b = b.overflow(t) {
447 for i := uintptr(0); i < abi.MapBucketCount; i++ {
448 if b.tophash[i] != top {
449 if b.tophash[i] == emptyRest {
450 break bucketloop
451 }
452 continue
453 }
454 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
455 if t.IndirectKey() {
456 k = *((*unsafe.Pointer)(k))
457 }
458 if t.Key.Equal(key, k) {
459 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
460 if t.IndirectElem() {
461 e = *((*unsafe.Pointer)(e))
462 }
463 return e
464 }
465 }
466 }
467 return unsafe.Pointer(&zeroVal[0])
468 }
469
470
471
472
473
474
475
476
477
478
479 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
480 if raceenabled && h != nil {
481 callerpc := getcallerpc()
482 pc := abi.FuncPCABIInternal(mapaccess2)
483 racereadpc(unsafe.Pointer(h), callerpc, pc)
484 raceReadObjectPC(t.Key, key, callerpc, pc)
485 }
486 if msanenabled && h != nil {
487 msanread(key, t.Key.Size_)
488 }
489 if asanenabled && h != nil {
490 asanread(key, t.Key.Size_)
491 }
492 if h == nil || h.count == 0 {
493 if err := mapKeyError(t, key); err != nil {
494 panic(err)
495 }
496 return unsafe.Pointer(&zeroVal[0]), false
497 }
498 if h.flags&hashWriting != 0 {
499 fatal("concurrent map read and map write")
500 }
501 hash := t.Hasher(key, uintptr(h.hash0))
502 m := bucketMask(h.B)
503 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
504 if c := h.oldbuckets; c != nil {
505 if !h.sameSizeGrow() {
506
507 m >>= 1
508 }
509 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
510 if !evacuated(oldb) {
511 b = oldb
512 }
513 }
514 top := tophash(hash)
515 bucketloop:
516 for ; b != nil; b = b.overflow(t) {
517 for i := uintptr(0); i < abi.MapBucketCount; i++ {
518 if b.tophash[i] != top {
519 if b.tophash[i] == emptyRest {
520 break bucketloop
521 }
522 continue
523 }
524 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
525 if t.IndirectKey() {
526 k = *((*unsafe.Pointer)(k))
527 }
528 if t.Key.Equal(key, k) {
529 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
530 if t.IndirectElem() {
531 e = *((*unsafe.Pointer)(e))
532 }
533 return e, true
534 }
535 }
536 }
537 return unsafe.Pointer(&zeroVal[0]), false
538 }
539
540
541 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
542 if h == nil || h.count == 0 {
543 return nil, nil
544 }
545 hash := t.Hasher(key, uintptr(h.hash0))
546 m := bucketMask(h.B)
547 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
548 if c := h.oldbuckets; c != nil {
549 if !h.sameSizeGrow() {
550
551 m >>= 1
552 }
553 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
554 if !evacuated(oldb) {
555 b = oldb
556 }
557 }
558 top := tophash(hash)
559 bucketloop:
560 for ; b != nil; b = b.overflow(t) {
561 for i := uintptr(0); i < abi.MapBucketCount; i++ {
562 if b.tophash[i] != top {
563 if b.tophash[i] == emptyRest {
564 break bucketloop
565 }
566 continue
567 }
568 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
569 if t.IndirectKey() {
570 k = *((*unsafe.Pointer)(k))
571 }
572 if t.Key.Equal(key, k) {
573 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
574 if t.IndirectElem() {
575 e = *((*unsafe.Pointer)(e))
576 }
577 return k, e
578 }
579 }
580 }
581 return nil, nil
582 }
583
584 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
585 e := mapaccess1(t, h, key)
586 if e == unsafe.Pointer(&zeroVal[0]) {
587 return zero
588 }
589 return e
590 }
591
592 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
593 e := mapaccess1(t, h, key)
594 if e == unsafe.Pointer(&zeroVal[0]) {
595 return zero, false
596 }
597 return e, true
598 }
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
616 if h == nil {
617 panic(plainError("assignment to entry in nil map"))
618 }
619 if raceenabled {
620 callerpc := getcallerpc()
621 pc := abi.FuncPCABIInternal(mapassign)
622 racewritepc(unsafe.Pointer(h), callerpc, pc)
623 raceReadObjectPC(t.Key, key, callerpc, pc)
624 }
625 if msanenabled {
626 msanread(key, t.Key.Size_)
627 }
628 if asanenabled {
629 asanread(key, t.Key.Size_)
630 }
631 if h.flags&hashWriting != 0 {
632 fatal("concurrent map writes")
633 }
634 hash := t.Hasher(key, uintptr(h.hash0))
635
636
637
638 h.flags ^= hashWriting
639
640 if h.buckets == nil {
641 h.buckets = newobject(t.Bucket)
642 }
643
644 again:
645 bucket := hash & bucketMask(h.B)
646 if h.growing() {
647 growWork(t, h, bucket)
648 }
649 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
650 top := tophash(hash)
651
652 var inserti *uint8
653 var insertk unsafe.Pointer
654 var elem unsafe.Pointer
655 bucketloop:
656 for {
657 for i := uintptr(0); i < abi.MapBucketCount; i++ {
658 if b.tophash[i] != top {
659 if isEmpty(b.tophash[i]) && inserti == nil {
660 inserti = &b.tophash[i]
661 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
662 elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
663 }
664 if b.tophash[i] == emptyRest {
665 break bucketloop
666 }
667 continue
668 }
669 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
670 if t.IndirectKey() {
671 k = *((*unsafe.Pointer)(k))
672 }
673 if !t.Key.Equal(key, k) {
674 continue
675 }
676
677 if t.NeedKeyUpdate() {
678 typedmemmove(t.Key, k, key)
679 }
680 elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
681 goto done
682 }
683 ovf := b.overflow(t)
684 if ovf == nil {
685 break
686 }
687 b = ovf
688 }
689
690
691
692
693
694 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
695 hashGrow(t, h)
696 goto again
697 }
698
699 if inserti == nil {
700
701 newb := h.newoverflow(t, b)
702 inserti = &newb.tophash[0]
703 insertk = add(unsafe.Pointer(newb), dataOffset)
704 elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
705 }
706
707
708 if t.IndirectKey() {
709 kmem := newobject(t.Key)
710 *(*unsafe.Pointer)(insertk) = kmem
711 insertk = kmem
712 }
713 if t.IndirectElem() {
714 vmem := newobject(t.Elem)
715 *(*unsafe.Pointer)(elem) = vmem
716 }
717 typedmemmove(t.Key, insertk, key)
718 *inserti = top
719 h.count++
720
721 done:
722 if h.flags&hashWriting == 0 {
723 fatal("concurrent map writes")
724 }
725 h.flags &^= hashWriting
726 if t.IndirectElem() {
727 elem = *((*unsafe.Pointer)(elem))
728 }
729 return elem
730 }
731
732
733
734
735
736
737
738
739
740
741 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
742 if raceenabled && h != nil {
743 callerpc := getcallerpc()
744 pc := abi.FuncPCABIInternal(mapdelete)
745 racewritepc(unsafe.Pointer(h), callerpc, pc)
746 raceReadObjectPC(t.Key, key, callerpc, pc)
747 }
748 if msanenabled && h != nil {
749 msanread(key, t.Key.Size_)
750 }
751 if asanenabled && h != nil {
752 asanread(key, t.Key.Size_)
753 }
754 if h == nil || h.count == 0 {
755 if err := mapKeyError(t, key); err != nil {
756 panic(err)
757 }
758 return
759 }
760 if h.flags&hashWriting != 0 {
761 fatal("concurrent map writes")
762 }
763
764 hash := t.Hasher(key, uintptr(h.hash0))
765
766
767
768 h.flags ^= hashWriting
769
770 bucket := hash & bucketMask(h.B)
771 if h.growing() {
772 growWork(t, h, bucket)
773 }
774 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
775 bOrig := b
776 top := tophash(hash)
777 search:
778 for ; b != nil; b = b.overflow(t) {
779 for i := uintptr(0); i < abi.MapBucketCount; i++ {
780 if b.tophash[i] != top {
781 if b.tophash[i] == emptyRest {
782 break search
783 }
784 continue
785 }
786 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
787 k2 := k
788 if t.IndirectKey() {
789 k2 = *((*unsafe.Pointer)(k2))
790 }
791 if !t.Key.Equal(key, k2) {
792 continue
793 }
794
795 if t.IndirectKey() {
796 *(*unsafe.Pointer)(k) = nil
797 } else if t.Key.Pointers() {
798 memclrHasPointers(k, t.Key.Size_)
799 }
800 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
801 if t.IndirectElem() {
802 *(*unsafe.Pointer)(e) = nil
803 } else if t.Elem.Pointers() {
804 memclrHasPointers(e, t.Elem.Size_)
805 } else {
806 memclrNoHeapPointers(e, t.Elem.Size_)
807 }
808 b.tophash[i] = emptyOne
809
810
811
812
813 if i == abi.MapBucketCount-1 {
814 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
815 goto notLast
816 }
817 } else {
818 if b.tophash[i+1] != emptyRest {
819 goto notLast
820 }
821 }
822 for {
823 b.tophash[i] = emptyRest
824 if i == 0 {
825 if b == bOrig {
826 break
827 }
828
829 c := b
830 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
831 }
832 i = abi.MapBucketCount - 1
833 } else {
834 i--
835 }
836 if b.tophash[i] != emptyOne {
837 break
838 }
839 }
840 notLast:
841 h.count--
842
843
844 if h.count == 0 {
845 h.hash0 = uint32(rand())
846 }
847 break search
848 }
849 }
850
851 if h.flags&hashWriting == 0 {
852 fatal("concurrent map writes")
853 }
854 h.flags &^= hashWriting
855 }
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877 func mapiterinit(t *maptype, h *hmap, it *hiter) {
878 if raceenabled && h != nil {
879 callerpc := getcallerpc()
880 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
881 }
882
883 it.t = t
884 if h == nil || h.count == 0 {
885 return
886 }
887
888 if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
889 throw("hash_iter size incorrect")
890 }
891 it.h = h
892
893
894 it.B = h.B
895 it.buckets = h.buckets
896 if !t.Bucket.Pointers() {
897
898
899
900
901 h.createOverflow()
902 it.overflow = h.extra.overflow
903 it.oldoverflow = h.extra.oldoverflow
904 }
905
906
907 r := uintptr(rand())
908 it.startBucket = r & bucketMask(h.B)
909 it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
910
911
912 it.bucket = it.startBucket
913
914
915
916 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
917 atomic.Or8(&h.flags, iterator|oldIterator)
918 }
919
920 mapiternext(it)
921 }
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937 func mapiternext(it *hiter) {
938 h := it.h
939 if raceenabled {
940 callerpc := getcallerpc()
941 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
942 }
943 if h.flags&hashWriting != 0 {
944 fatal("concurrent map iteration and map write")
945 }
946 t := it.t
947 bucket := it.bucket
948 b := it.bptr
949 i := it.i
950 checkBucket := it.checkBucket
951
952 next:
953 if b == nil {
954 if bucket == it.startBucket && it.wrapped {
955
956 it.key = nil
957 it.elem = nil
958 return
959 }
960 if h.growing() && it.B == h.B {
961
962
963
964
965 oldbucket := bucket & it.h.oldbucketmask()
966 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
967 if !evacuated(b) {
968 checkBucket = bucket
969 } else {
970 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
971 checkBucket = noCheck
972 }
973 } else {
974 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
975 checkBucket = noCheck
976 }
977 bucket++
978 if bucket == bucketShift(it.B) {
979 bucket = 0
980 it.wrapped = true
981 }
982 i = 0
983 }
984 for ; i < abi.MapBucketCount; i++ {
985 offi := (i + it.offset) & (abi.MapBucketCount - 1)
986 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
987
988
989 continue
990 }
991 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
992 if t.IndirectKey() {
993 k = *((*unsafe.Pointer)(k))
994 }
995 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
996 if checkBucket != noCheck && !h.sameSizeGrow() {
997
998
999
1000
1001
1002
1003
1004 if t.ReflexiveKey() || t.Key.Equal(k, k) {
1005
1006
1007 hash := t.Hasher(k, uintptr(h.hash0))
1008 if hash&bucketMask(it.B) != checkBucket {
1009 continue
1010 }
1011 } else {
1012
1013
1014
1015
1016
1017
1018
1019 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
1020 continue
1021 }
1022 }
1023 }
1024 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
1025 !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
1026
1027
1028
1029
1030 it.key = k
1031 if t.IndirectElem() {
1032 e = *((*unsafe.Pointer)(e))
1033 }
1034 it.elem = e
1035 } else {
1036
1037
1038
1039
1040
1041
1042
1043 rk, re := mapaccessK(t, h, k)
1044 if rk == nil {
1045 continue
1046 }
1047 it.key = rk
1048 it.elem = re
1049 }
1050 it.bucket = bucket
1051 if it.bptr != b {
1052 it.bptr = b
1053 }
1054 it.i = i + 1
1055 it.checkBucket = checkBucket
1056 return
1057 }
1058 b = b.overflow(t)
1059 i = 0
1060 goto next
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 func mapclear(t *maptype, h *hmap) {
1076 if raceenabled && h != nil {
1077 callerpc := getcallerpc()
1078 pc := abi.FuncPCABIInternal(mapclear)
1079 racewritepc(unsafe.Pointer(h), callerpc, pc)
1080 }
1081
1082 if h == nil || h.count == 0 {
1083 return
1084 }
1085
1086 if h.flags&hashWriting != 0 {
1087 fatal("concurrent map writes")
1088 }
1089
1090 h.flags ^= hashWriting
1091
1092
1093 markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1094 for i := uintptr(0); i <= mask; i++ {
1095 b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1096 for ; b != nil; b = b.overflow(t) {
1097 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1098 b.tophash[i] = emptyRest
1099 }
1100 }
1101 }
1102 }
1103 markBucketsEmpty(h.buckets, bucketMask(h.B))
1104 if oldBuckets := h.oldbuckets; oldBuckets != nil {
1105 markBucketsEmpty(oldBuckets, h.oldbucketmask())
1106 }
1107
1108 h.flags &^= sameSizeGrow
1109 h.oldbuckets = nil
1110 h.nevacuate = 0
1111 h.noverflow = 0
1112 h.count = 0
1113
1114
1115
1116 h.hash0 = uint32(rand())
1117
1118
1119 if h.extra != nil {
1120 *h.extra = mapextra{}
1121 }
1122
1123
1124
1125
1126 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1127 if nextOverflow != nil {
1128
1129
1130 h.extra.nextOverflow = nextOverflow
1131 }
1132
1133 if h.flags&hashWriting == 0 {
1134 fatal("concurrent map writes")
1135 }
1136 h.flags &^= hashWriting
1137 }
1138
1139 func hashGrow(t *maptype, h *hmap) {
1140
1141
1142
1143 bigger := uint8(1)
1144 if !overLoadFactor(h.count+1, h.B) {
1145 bigger = 0
1146 h.flags |= sameSizeGrow
1147 }
1148 oldbuckets := h.buckets
1149 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1150
1151 flags := h.flags &^ (iterator | oldIterator)
1152 if h.flags&iterator != 0 {
1153 flags |= oldIterator
1154 }
1155
1156 h.B += bigger
1157 h.flags = flags
1158 h.oldbuckets = oldbuckets
1159 h.buckets = newbuckets
1160 h.nevacuate = 0
1161 h.noverflow = 0
1162
1163 if h.extra != nil && h.extra.overflow != nil {
1164
1165 if h.extra.oldoverflow != nil {
1166 throw("oldoverflow is not nil")
1167 }
1168 h.extra.oldoverflow = h.extra.overflow
1169 h.extra.overflow = nil
1170 }
1171 if nextOverflow != nil {
1172 if h.extra == nil {
1173 h.extra = new(mapextra)
1174 }
1175 h.extra.nextOverflow = nextOverflow
1176 }
1177
1178
1179
1180 }
1181
1182
1183 func overLoadFactor(count int, B uint8) bool {
1184 return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1185 }
1186
1187
1188
1189
1190 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1191
1192
1193
1194
1195 if B > 15 {
1196 B = 15
1197 }
1198
1199 return noverflow >= uint16(1)<<(B&15)
1200 }
1201
1202
1203 func (h *hmap) growing() bool {
1204 return h.oldbuckets != nil
1205 }
1206
1207
1208 func (h *hmap) sameSizeGrow() bool {
1209 return h.flags&sameSizeGrow != 0
1210 }
1211
1212
1213 func sameSizeGrowForIssue69110Test(h *hmap) bool {
1214 return h.sameSizeGrow()
1215 }
1216
1217
1218 func (h *hmap) noldbuckets() uintptr {
1219 oldB := h.B
1220 if !h.sameSizeGrow() {
1221 oldB--
1222 }
1223 return bucketShift(oldB)
1224 }
1225
1226
1227 func (h *hmap) oldbucketmask() uintptr {
1228 return h.noldbuckets() - 1
1229 }
1230
1231 func growWork(t *maptype, h *hmap, bucket uintptr) {
1232
1233
1234 evacuate(t, h, bucket&h.oldbucketmask())
1235
1236
1237 if h.growing() {
1238 evacuate(t, h, h.nevacuate)
1239 }
1240 }
1241
1242 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1243 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1244 return evacuated(b)
1245 }
1246
1247
1248 type evacDst struct {
1249 b *bmap
1250 i int
1251 k unsafe.Pointer
1252 e unsafe.Pointer
1253 }
1254
1255 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1256 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1257 newbit := h.noldbuckets()
1258 if !evacuated(b) {
1259
1260
1261
1262
1263 var xy [2]evacDst
1264 x := &xy[0]
1265 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1266 x.k = add(unsafe.Pointer(x.b), dataOffset)
1267 x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
1268
1269 if !h.sameSizeGrow() {
1270
1271
1272 y := &xy[1]
1273 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1274 y.k = add(unsafe.Pointer(y.b), dataOffset)
1275 y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
1276 }
1277
1278 for ; b != nil; b = b.overflow(t) {
1279 k := add(unsafe.Pointer(b), dataOffset)
1280 e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
1281 for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1282 top := b.tophash[i]
1283 if isEmpty(top) {
1284 b.tophash[i] = evacuatedEmpty
1285 continue
1286 }
1287 if top < minTopHash {
1288 throw("bad map state")
1289 }
1290 k2 := k
1291 if t.IndirectKey() {
1292 k2 = *((*unsafe.Pointer)(k2))
1293 }
1294 var useY uint8
1295 if !h.sameSizeGrow() {
1296
1297
1298 hash := t.Hasher(k2, uintptr(h.hash0))
1299 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 useY = top & 1
1312 top = tophash(hash)
1313 } else {
1314 if hash&newbit != 0 {
1315 useY = 1
1316 }
1317 }
1318 }
1319
1320 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1321 throw("bad evacuatedN")
1322 }
1323
1324 b.tophash[i] = evacuatedX + useY
1325 dst := &xy[useY]
1326
1327 if dst.i == abi.MapBucketCount {
1328 dst.b = h.newoverflow(t, dst.b)
1329 dst.i = 0
1330 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1331 dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
1332 }
1333 dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top
1334 if t.IndirectKey() {
1335 *(*unsafe.Pointer)(dst.k) = k2
1336 } else {
1337 typedmemmove(t.Key, dst.k, k)
1338 }
1339 if t.IndirectElem() {
1340 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1341 } else {
1342 typedmemmove(t.Elem, dst.e, e)
1343 }
1344 dst.i++
1345
1346
1347
1348
1349 dst.k = add(dst.k, uintptr(t.KeySize))
1350 dst.e = add(dst.e, uintptr(t.ValueSize))
1351 }
1352 }
1353
1354 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
1355 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1356
1357
1358 ptr := add(b, dataOffset)
1359 n := uintptr(t.BucketSize) - dataOffset
1360 memclrHasPointers(ptr, n)
1361 }
1362 }
1363
1364 if oldbucket == h.nevacuate {
1365 advanceEvacuationMark(h, t, newbit)
1366 }
1367 }
1368
1369 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1370 h.nevacuate++
1371
1372
1373 stop := h.nevacuate + 1024
1374 if stop > newbit {
1375 stop = newbit
1376 }
1377 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1378 h.nevacuate++
1379 }
1380 if h.nevacuate == newbit {
1381
1382 h.oldbuckets = nil
1383
1384
1385
1386 if h.extra != nil {
1387 h.extra.oldoverflow = nil
1388 }
1389 h.flags &^= sameSizeGrow
1390 }
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 func reflect_makemap(t *maptype, cap int) *hmap {
1410
1411 if t.Key.Equal == nil {
1412 throw("runtime.reflect_makemap: unsupported map key type")
1413 }
1414 if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1415 t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1416 throw("key size wrong")
1417 }
1418 if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1419 t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1420 throw("elem size wrong")
1421 }
1422 if t.Key.Align_ > abi.MapBucketCount {
1423 throw("key align too big")
1424 }
1425 if t.Elem.Align_ > abi.MapBucketCount {
1426 throw("elem align too big")
1427 }
1428 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1429 throw("key size not a multiple of key align")
1430 }
1431 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1432 throw("elem size not a multiple of elem align")
1433 }
1434 if abi.MapBucketCount < 8 {
1435 throw("bucketsize too small for proper alignment")
1436 }
1437 if dataOffset%uintptr(t.Key.Align_) != 0 {
1438 throw("need padding in bucket (key)")
1439 }
1440 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1441 throw("need padding in bucket (elem)")
1442 }
1443
1444 return makemap(t, cap, nil)
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1459 elem, ok := mapaccess2(t, h, key)
1460 if !ok {
1461
1462 elem = nil
1463 }
1464 return elem
1465 }
1466
1467
1468 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1469 elem, ok := mapaccess2_faststr(t, h, key)
1470 if !ok {
1471
1472 elem = nil
1473 }
1474 return elem
1475 }
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1487 p := mapassign(t, h, key)
1488 typedmemmove(t.Elem, p, elem)
1489 }
1490
1491
1492 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1493 p := mapassign_faststr(t, h, key)
1494 typedmemmove(t.Elem, p, elem)
1495 }
1496
1497
1498 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1499 mapdelete(t, h, key)
1500 }
1501
1502
1503 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1504 mapdelete_faststr(t, h, key)
1505 }
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1520 mapiterinit(t, h, it)
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 func reflect_mapiternext(it *hiter) {
1537 mapiternext(it)
1538 }
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1551 return it.key
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1565 return it.elem
1566 }
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 func reflect_maplen(h *hmap) int {
1579 if h == nil {
1580 return 0
1581 }
1582 if raceenabled {
1583 callerpc := getcallerpc()
1584 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1585 }
1586 return h.count
1587 }
1588
1589
1590 func reflect_mapclear(t *maptype, h *hmap) {
1591 mapclear(t, h)
1592 }
1593
1594
1595 func reflectlite_maplen(h *hmap) int {
1596 if h == nil {
1597 return 0
1598 }
1599 if raceenabled {
1600 callerpc := getcallerpc()
1601 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1602 }
1603 return h.count
1604 }
1605
1606
1607
1608
1609
1610
1611 func mapinitnoop()
1612
1613
1614
1615
1616 func mapclone(m any) any {
1617 e := efaceOf(&m)
1618 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1619 return m
1620 }
1621
1622
1623
1624 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1625 for i := 0; i < abi.MapBucketCount; i++ {
1626 if isEmpty(src.tophash[i]) {
1627 continue
1628 }
1629
1630 for ; pos < abi.MapBucketCount; pos++ {
1631 if isEmpty(dst.tophash[pos]) {
1632 break
1633 }
1634 }
1635
1636 if pos == abi.MapBucketCount {
1637 dst = h.newoverflow(t, dst)
1638 pos = 0
1639 }
1640
1641 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1642 srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1643 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1644 dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1645
1646 dst.tophash[pos] = src.tophash[i]
1647 if t.IndirectKey() {
1648 srcK = *(*unsafe.Pointer)(srcK)
1649 if t.NeedKeyUpdate() {
1650 kStore := newobject(t.Key)
1651 typedmemmove(t.Key, kStore, srcK)
1652 srcK = kStore
1653 }
1654
1655
1656
1657 *(*unsafe.Pointer)(dstK) = srcK
1658 } else {
1659 typedmemmove(t.Key, dstK, srcK)
1660 }
1661 if t.IndirectElem() {
1662 srcEle = *(*unsafe.Pointer)(srcEle)
1663 eStore := newobject(t.Elem)
1664 typedmemmove(t.Elem, eStore, srcEle)
1665 *(*unsafe.Pointer)(dstEle) = eStore
1666 } else {
1667 typedmemmove(t.Elem, dstEle, srcEle)
1668 }
1669 pos++
1670 h.count++
1671 }
1672 return dst, pos
1673 }
1674
1675 func mapclone2(t *maptype, src *hmap) *hmap {
1676 hint := src.count
1677 if overLoadFactor(hint, src.B) {
1678
1679
1680
1681
1682
1683 hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
1684 }
1685 dst := makemap(t, hint, nil)
1686 dst.hash0 = src.hash0
1687 dst.nevacuate = 0
1688
1689
1690 if src.count == 0 {
1691 return dst
1692 }
1693
1694 if src.flags&hashWriting != 0 {
1695 fatal("concurrent map clone and map write")
1696 }
1697
1698 if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1699
1700 dst.buckets = newobject(t.Bucket)
1701 dst.count = src.count
1702 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1703 return dst
1704 }
1705
1706 if dst.B == 0 {
1707 dst.buckets = newobject(t.Bucket)
1708 }
1709 dstArraySize := int(bucketShift(dst.B))
1710 srcArraySize := int(bucketShift(src.B))
1711 for i := 0; i < dstArraySize; i++ {
1712 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1713 pos := 0
1714 for j := 0; j < srcArraySize; j += dstArraySize {
1715 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1716 for srcBmap != nil {
1717 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1718 srcBmap = srcBmap.overflow(t)
1719 }
1720 }
1721 }
1722
1723 if src.oldbuckets == nil {
1724 return dst
1725 }
1726
1727 oldB := src.B
1728 srcOldbuckets := src.oldbuckets
1729 if !src.sameSizeGrow() {
1730 oldB--
1731 }
1732 oldSrcArraySize := int(bucketShift(oldB))
1733
1734 for i := 0; i < oldSrcArraySize; i++ {
1735 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1736 if evacuated(srcBmap) {
1737 continue
1738 }
1739
1740 if oldB >= dst.B {
1741 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1742 for dstBmap.overflow(t) != nil {
1743 dstBmap = dstBmap.overflow(t)
1744 }
1745 pos := 0
1746 for srcBmap != nil {
1747 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1748 srcBmap = srcBmap.overflow(t)
1749 }
1750 continue
1751 }
1752
1753
1754
1755 for srcBmap != nil {
1756
1757 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1758 if isEmpty(srcBmap.tophash[i]) {
1759 continue
1760 }
1761
1762 if src.flags&hashWriting != 0 {
1763 fatal("concurrent map clone and map write")
1764 }
1765
1766 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1767 if t.IndirectKey() {
1768 srcK = *((*unsafe.Pointer)(srcK))
1769 }
1770
1771 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1772 if t.IndirectElem() {
1773 srcEle = *((*unsafe.Pointer)(srcEle))
1774 }
1775 dstEle := mapassign(t, dst, srcK)
1776 typedmemmove(t.Elem, dstEle, srcEle)
1777 }
1778 srcBmap = srcBmap.overflow(t)
1779 }
1780 }
1781 return dst
1782 }
1783
1784
1785
1786
1787 func keys(m any, p unsafe.Pointer) {
1788 e := efaceOf(&m)
1789 t := (*maptype)(unsafe.Pointer(e._type))
1790 h := (*hmap)(e.data)
1791
1792 if h == nil || h.count == 0 {
1793 return
1794 }
1795 s := (*slice)(p)
1796 r := int(rand())
1797 offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1798 if h.B == 0 {
1799 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1800 return
1801 }
1802 arraySize := int(bucketShift(h.B))
1803 buckets := h.buckets
1804 for i := 0; i < arraySize; i++ {
1805 bucket := (i + r) & (arraySize - 1)
1806 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1807 copyKeys(t, h, b, s, offset)
1808 }
1809
1810 if h.growing() {
1811 oldArraySize := int(h.noldbuckets())
1812 for i := 0; i < oldArraySize; i++ {
1813 bucket := (i + r) & (oldArraySize - 1)
1814 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1815 if evacuated(b) {
1816 continue
1817 }
1818 copyKeys(t, h, b, s, offset)
1819 }
1820 }
1821 return
1822 }
1823
1824 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1825 for b != nil {
1826 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1827 offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1828 if isEmpty(b.tophash[offi]) {
1829 continue
1830 }
1831 if h.flags&hashWriting != 0 {
1832 fatal("concurrent map read and map write")
1833 }
1834 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1835 if t.IndirectKey() {
1836 k = *((*unsafe.Pointer)(k))
1837 }
1838 if s.len >= s.cap {
1839 fatal("concurrent map read and map write")
1840 }
1841 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1842 s.len++
1843 }
1844 b = b.overflow(t)
1845 }
1846 }
1847
1848
1849
1850
1851 func values(m any, p unsafe.Pointer) {
1852 e := efaceOf(&m)
1853 t := (*maptype)(unsafe.Pointer(e._type))
1854 h := (*hmap)(e.data)
1855 if h == nil || h.count == 0 {
1856 return
1857 }
1858 s := (*slice)(p)
1859 r := int(rand())
1860 offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1861 if h.B == 0 {
1862 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1863 return
1864 }
1865 arraySize := int(bucketShift(h.B))
1866 buckets := h.buckets
1867 for i := 0; i < arraySize; i++ {
1868 bucket := (i + r) & (arraySize - 1)
1869 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1870 copyValues(t, h, b, s, offset)
1871 }
1872
1873 if h.growing() {
1874 oldArraySize := int(h.noldbuckets())
1875 for i := 0; i < oldArraySize; i++ {
1876 bucket := (i + r) & (oldArraySize - 1)
1877 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1878 if evacuated(b) {
1879 continue
1880 }
1881 copyValues(t, h, b, s, offset)
1882 }
1883 }
1884 return
1885 }
1886
1887 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1888 for b != nil {
1889 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1890 offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1891 if isEmpty(b.tophash[offi]) {
1892 continue
1893 }
1894
1895 if h.flags&hashWriting != 0 {
1896 fatal("concurrent map read and map write")
1897 }
1898
1899 ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1900 if t.IndirectElem() {
1901 ele = *((*unsafe.Pointer)(ele))
1902 }
1903 if s.len >= s.cap {
1904 fatal("concurrent map read and map write")
1905 }
1906 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1907 s.len++
1908 }
1909 b = b.overflow(t)
1910 }
1911 }
1912
View as plain text