Source file
src/runtime/map_noswiss.go
1
2
3
4
5
6
7 package runtime
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "internal/runtime/math"
63 "internal/runtime/sys"
64 "unsafe"
65 )
66
67 type maptype = abi.OldMapType
68
69 const (
70
71 bucketCntBits = abi.OldMapBucketCountBits
72
73
74
75
76 loadFactorDen = 2
77 loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
78
79
80
81
82 dataOffset = unsafe.Offsetof(struct {
83 b bmap
84 v int64
85 }{}.v)
86
87
88
89
90
91 emptyRest = 0
92 emptyOne = 1
93 evacuatedX = 2
94 evacuatedY = 3
95 evacuatedEmpty = 4
96 minTopHash = 5
97
98
99 iterator = 1
100 oldIterator = 2
101 hashWriting = 4
102 sameSizeGrow = 8
103
104
105 noCheck = 1<<(8*goarch.PtrSize) - 1
106 )
107
108
109 func isEmpty(x uint8) bool {
110 return x <= emptyOne
111 }
112
113
114 type hmap struct {
115
116
117 count int
118 flags uint8
119 B uint8
120 noverflow uint16
121 hash0 uint32
122
123 buckets unsafe.Pointer
124 oldbuckets unsafe.Pointer
125 nevacuate uintptr
126 clearSeq uint64
127
128 extra *mapextra
129 }
130
131
132 type mapextra struct {
133
134
135
136
137
138
139
140
141 overflow *[]*bmap
142 oldoverflow *[]*bmap
143
144
145 nextOverflow *bmap
146 }
147
148
149 type bmap struct {
150
151
152
153 tophash [abi.OldMapBucketCount]uint8
154
155
156
157
158
159 }
160
161
162
163
164 type hiter struct {
165 key unsafe.Pointer
166 elem unsafe.Pointer
167 t *maptype
168 h *hmap
169 buckets unsafe.Pointer
170 bptr *bmap
171 overflow *[]*bmap
172 oldoverflow *[]*bmap
173 startBucket uintptr
174 offset uint8
175 wrapped bool
176 B uint8
177 i uint8
178 bucket uintptr
179 checkBucket uintptr
180 clearSeq uint64
181 }
182
183
184 func bucketShift(b uint8) uintptr {
185
186 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
187 }
188
189
190 func bucketMask(b uint8) uintptr {
191 return bucketShift(b) - 1
192 }
193
194
195 func tophash(hash uintptr) uint8 {
196 top := uint8(hash >> (goarch.PtrSize*8 - 8))
197 if top < minTopHash {
198 top += minTopHash
199 }
200 return top
201 }
202
203 func evacuated(b *bmap) bool {
204 h := b.tophash[0]
205 return h > emptyOne && h < minTopHash
206 }
207
208 func (b *bmap) overflow(t *maptype) *bmap {
209 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
210 }
211
212 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
213 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
214 }
215
216 func (b *bmap) keys() unsafe.Pointer {
217 return add(unsafe.Pointer(b), dataOffset)
218 }
219
220
221
222
223
224
225
226
227 func (h *hmap) incrnoverflow() {
228
229
230
231 if h.B < 16 {
232 h.noverflow++
233 return
234 }
235
236
237
238 mask := uint32(1)<<(h.B-15) - 1
239
240
241 if uint32(rand())&mask == 0 {
242 h.noverflow++
243 }
244 }
245
246 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
247 var ovf *bmap
248 if h.extra != nil && h.extra.nextOverflow != nil {
249
250
251 ovf = h.extra.nextOverflow
252 if ovf.overflow(t) == nil {
253
254 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
255 } else {
256
257
258
259 ovf.setoverflow(t, nil)
260 h.extra.nextOverflow = nil
261 }
262 } else {
263 ovf = (*bmap)(newobject(t.Bucket))
264 }
265 h.incrnoverflow()
266 if !t.Bucket.Pointers() {
267 h.createOverflow()
268 *h.extra.overflow = append(*h.extra.overflow, ovf)
269 }
270 b.setoverflow(t, ovf)
271 return ovf
272 }
273
274 func (h *hmap) createOverflow() {
275 if h.extra == nil {
276 h.extra = new(mapextra)
277 }
278 if h.extra.overflow == nil {
279 h.extra.overflow = new([]*bmap)
280 }
281 }
282
283 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
284 if int64(int(hint)) != hint {
285 hint = 0
286 }
287 return makemap(t, int(hint), h)
288 }
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 func makemap_small() *hmap {
304 h := new(hmap)
305 h.hash0 = uint32(rand())
306 return h
307 }
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324 func makemap(t *maptype, hint int, h *hmap) *hmap {
325 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
326 if overflow || mem > maxAlloc {
327 hint = 0
328 }
329
330
331 if h == nil {
332 h = new(hmap)
333 }
334 h.hash0 = uint32(rand())
335
336
337
338 B := uint8(0)
339 for overLoadFactor(hint, B) {
340 B++
341 }
342 h.B = B
343
344
345
346
347 if h.B != 0 {
348 var nextOverflow *bmap
349 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
350 if nextOverflow != nil {
351 h.extra = new(mapextra)
352 h.extra.nextOverflow = nextOverflow
353 }
354 }
355
356 return h
357 }
358
359
360
361
362
363
364
365 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
366 base := bucketShift(b)
367 nbuckets := base
368
369
370 if b >= 4 {
371
372
373
374 nbuckets += bucketShift(b - 4)
375 sz := t.Bucket.Size_ * nbuckets
376 up := roundupsize(sz, !t.Bucket.Pointers())
377 if up != sz {
378 nbuckets = up / t.Bucket.Size_
379 }
380 }
381
382 if dirtyalloc == nil {
383 buckets = newarray(t.Bucket, int(nbuckets))
384 } else {
385
386
387
388 buckets = dirtyalloc
389 size := t.Bucket.Size_ * nbuckets
390 if t.Bucket.Pointers() {
391 memclrHasPointers(buckets, size)
392 } else {
393 memclrNoHeapPointers(buckets, size)
394 }
395 }
396
397 if base != nbuckets {
398
399
400
401
402
403 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
404 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
405 last.setoverflow(t, (*bmap)(buckets))
406 }
407 return buckets, nextOverflow
408 }
409
410
411
412
413
414
415 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
416 if raceenabled && h != nil {
417 callerpc := sys.GetCallerPC()
418 pc := abi.FuncPCABIInternal(mapaccess1)
419 racereadpc(unsafe.Pointer(h), callerpc, pc)
420 raceReadObjectPC(t.Key, key, callerpc, pc)
421 }
422 if msanenabled && h != nil {
423 msanread(key, t.Key.Size_)
424 }
425 if asanenabled && h != nil {
426 asanread(key, t.Key.Size_)
427 }
428 if h == nil || h.count == 0 {
429 if err := mapKeyError(t, key); err != nil {
430 panic(err)
431 }
432 return unsafe.Pointer(&zeroVal[0])
433 }
434 if h.flags&hashWriting != 0 {
435 fatal("concurrent map read and map write")
436 }
437 hash := t.Hasher(key, uintptr(h.hash0))
438 m := bucketMask(h.B)
439 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
440 if c := h.oldbuckets; c != nil {
441 if !h.sameSizeGrow() {
442
443 m >>= 1
444 }
445 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
446 if !evacuated(oldb) {
447 b = oldb
448 }
449 }
450 top := tophash(hash)
451 bucketloop:
452 for ; b != nil; b = b.overflow(t) {
453 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
454 if b.tophash[i] != top {
455 if b.tophash[i] == emptyRest {
456 break bucketloop
457 }
458 continue
459 }
460 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
461 if t.IndirectKey() {
462 k = *((*unsafe.Pointer)(k))
463 }
464 if t.Key.Equal(key, k) {
465 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
466 if t.IndirectElem() {
467 e = *((*unsafe.Pointer)(e))
468 }
469 return e
470 }
471 }
472 }
473 return unsafe.Pointer(&zeroVal[0])
474 }
475
476
477
478
479
480
481
482
483
484
485 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
486 if raceenabled && h != nil {
487 callerpc := sys.GetCallerPC()
488 pc := abi.FuncPCABIInternal(mapaccess2)
489 racereadpc(unsafe.Pointer(h), callerpc, pc)
490 raceReadObjectPC(t.Key, key, callerpc, pc)
491 }
492 if msanenabled && h != nil {
493 msanread(key, t.Key.Size_)
494 }
495 if asanenabled && h != nil {
496 asanread(key, t.Key.Size_)
497 }
498 if h == nil || h.count == 0 {
499 if err := mapKeyError(t, key); err != nil {
500 panic(err)
501 }
502 return unsafe.Pointer(&zeroVal[0]), false
503 }
504 if h.flags&hashWriting != 0 {
505 fatal("concurrent map read and map write")
506 }
507 hash := t.Hasher(key, uintptr(h.hash0))
508 m := bucketMask(h.B)
509 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
510 if c := h.oldbuckets; c != nil {
511 if !h.sameSizeGrow() {
512
513 m >>= 1
514 }
515 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
516 if !evacuated(oldb) {
517 b = oldb
518 }
519 }
520 top := tophash(hash)
521 bucketloop:
522 for ; b != nil; b = b.overflow(t) {
523 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
524 if b.tophash[i] != top {
525 if b.tophash[i] == emptyRest {
526 break bucketloop
527 }
528 continue
529 }
530 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
531 if t.IndirectKey() {
532 k = *((*unsafe.Pointer)(k))
533 }
534 if t.Key.Equal(key, k) {
535 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
536 if t.IndirectElem() {
537 e = *((*unsafe.Pointer)(e))
538 }
539 return e, true
540 }
541 }
542 }
543 return unsafe.Pointer(&zeroVal[0]), false
544 }
545
546
547 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
548 if h == nil || h.count == 0 {
549 return nil, nil
550 }
551 hash := t.Hasher(key, uintptr(h.hash0))
552 m := bucketMask(h.B)
553 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
554 if c := h.oldbuckets; c != nil {
555 if !h.sameSizeGrow() {
556
557 m >>= 1
558 }
559 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
560 if !evacuated(oldb) {
561 b = oldb
562 }
563 }
564 top := tophash(hash)
565 bucketloop:
566 for ; b != nil; b = b.overflow(t) {
567 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
568 if b.tophash[i] != top {
569 if b.tophash[i] == emptyRest {
570 break bucketloop
571 }
572 continue
573 }
574 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
575 if t.IndirectKey() {
576 k = *((*unsafe.Pointer)(k))
577 }
578 if t.Key.Equal(key, k) {
579 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
580 if t.IndirectElem() {
581 e = *((*unsafe.Pointer)(e))
582 }
583 return k, e
584 }
585 }
586 }
587 return nil, nil
588 }
589
590 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
591 e := mapaccess1(t, h, key)
592 if e == unsafe.Pointer(&zeroVal[0]) {
593 return zero
594 }
595 return e
596 }
597
598 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
599 e := mapaccess1(t, h, key)
600 if e == unsafe.Pointer(&zeroVal[0]) {
601 return zero, false
602 }
603 return e, true
604 }
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
621 if h == nil {
622 panic(plainError("assignment to entry in nil map"))
623 }
624 if raceenabled {
625 callerpc := sys.GetCallerPC()
626 pc := abi.FuncPCABIInternal(mapassign)
627 racewritepc(unsafe.Pointer(h), callerpc, pc)
628 raceReadObjectPC(t.Key, key, callerpc, pc)
629 }
630 if msanenabled {
631 msanread(key, t.Key.Size_)
632 }
633 if asanenabled {
634 asanread(key, t.Key.Size_)
635 }
636 if h.flags&hashWriting != 0 {
637 fatal("concurrent map writes")
638 }
639 hash := t.Hasher(key, uintptr(h.hash0))
640
641
642
643 h.flags ^= hashWriting
644
645 if h.buckets == nil {
646 h.buckets = newobject(t.Bucket)
647 }
648
649 again:
650 bucket := hash & bucketMask(h.B)
651 if h.growing() {
652 growWork(t, h, bucket)
653 }
654 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
655 top := tophash(hash)
656
657 var inserti *uint8
658 var insertk unsafe.Pointer
659 var elem unsafe.Pointer
660 bucketloop:
661 for {
662 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
663 if b.tophash[i] != top {
664 if isEmpty(b.tophash[i]) && inserti == nil {
665 inserti = &b.tophash[i]
666 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
667 elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
668 }
669 if b.tophash[i] == emptyRest {
670 break bucketloop
671 }
672 continue
673 }
674 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
675 if t.IndirectKey() {
676 k = *((*unsafe.Pointer)(k))
677 }
678 if !t.Key.Equal(key, k) {
679 continue
680 }
681
682 if t.NeedKeyUpdate() {
683 typedmemmove(t.Key, k, key)
684 }
685 elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
686 goto done
687 }
688 ovf := b.overflow(t)
689 if ovf == nil {
690 break
691 }
692 b = ovf
693 }
694
695
696
697
698
699 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
700 hashGrow(t, h)
701 goto again
702 }
703
704 if inserti == nil {
705
706 newb := h.newoverflow(t, b)
707 inserti = &newb.tophash[0]
708 insertk = add(unsafe.Pointer(newb), dataOffset)
709 elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
710 }
711
712
713 if t.IndirectKey() {
714 kmem := newobject(t.Key)
715 *(*unsafe.Pointer)(insertk) = kmem
716 insertk = kmem
717 }
718 if t.IndirectElem() {
719 vmem := newobject(t.Elem)
720 *(*unsafe.Pointer)(elem) = vmem
721 }
722 typedmemmove(t.Key, insertk, key)
723 *inserti = top
724 h.count++
725
726 done:
727 if h.flags&hashWriting == 0 {
728 fatal("concurrent map writes")
729 }
730 h.flags &^= hashWriting
731 if t.IndirectElem() {
732 elem = *((*unsafe.Pointer)(elem))
733 }
734 return elem
735 }
736
737
738
739
740
741
742
743
744
745
746 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
747 if raceenabled && h != nil {
748 callerpc := sys.GetCallerPC()
749 pc := abi.FuncPCABIInternal(mapdelete)
750 racewritepc(unsafe.Pointer(h), callerpc, pc)
751 raceReadObjectPC(t.Key, key, callerpc, pc)
752 }
753 if msanenabled && h != nil {
754 msanread(key, t.Key.Size_)
755 }
756 if asanenabled && h != nil {
757 asanread(key, t.Key.Size_)
758 }
759 if h == nil || h.count == 0 {
760 if err := mapKeyError(t, key); err != nil {
761 panic(err)
762 }
763 return
764 }
765 if h.flags&hashWriting != 0 {
766 fatal("concurrent map writes")
767 }
768
769 hash := t.Hasher(key, uintptr(h.hash0))
770
771
772
773 h.flags ^= hashWriting
774
775 bucket := hash & bucketMask(h.B)
776 if h.growing() {
777 growWork(t, h, bucket)
778 }
779 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
780 bOrig := b
781 top := tophash(hash)
782 search:
783 for ; b != nil; b = b.overflow(t) {
784 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
785 if b.tophash[i] != top {
786 if b.tophash[i] == emptyRest {
787 break search
788 }
789 continue
790 }
791 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
792 k2 := k
793 if t.IndirectKey() {
794 k2 = *((*unsafe.Pointer)(k2))
795 }
796 if !t.Key.Equal(key, k2) {
797 continue
798 }
799
800 if t.IndirectKey() {
801 *(*unsafe.Pointer)(k) = nil
802 } else if t.Key.Pointers() {
803 memclrHasPointers(k, t.Key.Size_)
804 }
805 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
806 if t.IndirectElem() {
807 *(*unsafe.Pointer)(e) = nil
808 } else if t.Elem.Pointers() {
809 memclrHasPointers(e, t.Elem.Size_)
810 } else {
811 memclrNoHeapPointers(e, t.Elem.Size_)
812 }
813 b.tophash[i] = emptyOne
814
815
816
817
818 if i == abi.OldMapBucketCount-1 {
819 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
820 goto notLast
821 }
822 } else {
823 if b.tophash[i+1] != emptyRest {
824 goto notLast
825 }
826 }
827 for {
828 b.tophash[i] = emptyRest
829 if i == 0 {
830 if b == bOrig {
831 break
832 }
833
834 c := b
835 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
836 }
837 i = abi.OldMapBucketCount - 1
838 } else {
839 i--
840 }
841 if b.tophash[i] != emptyOne {
842 break
843 }
844 }
845 notLast:
846 h.count--
847
848
849 if h.count == 0 {
850 h.hash0 = uint32(rand())
851 }
852 break search
853 }
854 }
855
856 if h.flags&hashWriting == 0 {
857 fatal("concurrent map writes")
858 }
859 h.flags &^= hashWriting
860 }
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881 func mapiterinit(t *maptype, h *hmap, it *hiter) {
882 if raceenabled && h != nil {
883 callerpc := sys.GetCallerPC()
884 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
885 }
886
887 it.t = t
888 if h == nil || h.count == 0 {
889 return
890 }
891
892 if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
893 throw("hash_iter size incorrect")
894 }
895 it.h = h
896 it.clearSeq = h.clearSeq
897
898
899 it.B = h.B
900 it.buckets = h.buckets
901 if !t.Bucket.Pointers() {
902
903
904
905
906 h.createOverflow()
907 it.overflow = h.extra.overflow
908 it.oldoverflow = h.extra.oldoverflow
909 }
910
911
912 r := uintptr(rand())
913 it.startBucket = r & bucketMask(h.B)
914 it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
915
916
917 it.bucket = it.startBucket
918
919
920
921 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
922 atomic.Or8(&h.flags, iterator|oldIterator)
923 }
924
925 mapiternext(it)
926 }
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941 func mapiternext(it *hiter) {
942 h := it.h
943 if raceenabled {
944 callerpc := sys.GetCallerPC()
945 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
946 }
947 if h.flags&hashWriting != 0 {
948 fatal("concurrent map iteration and map write")
949 }
950 t := it.t
951 bucket := it.bucket
952 b := it.bptr
953 i := it.i
954 checkBucket := it.checkBucket
955
956 next:
957 if b == nil {
958 if bucket == it.startBucket && it.wrapped {
959
960 it.key = nil
961 it.elem = nil
962 return
963 }
964 if h.growing() && it.B == h.B {
965
966
967
968
969 oldbucket := bucket & it.h.oldbucketmask()
970 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
971 if !evacuated(b) {
972 checkBucket = bucket
973 } else {
974 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
975 checkBucket = noCheck
976 }
977 } else {
978 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
979 checkBucket = noCheck
980 }
981 bucket++
982 if bucket == bucketShift(it.B) {
983 bucket = 0
984 it.wrapped = true
985 }
986 i = 0
987 }
988 for ; i < abi.OldMapBucketCount; i++ {
989 offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
990 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
991
992
993 continue
994 }
995 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
996 if t.IndirectKey() {
997 k = *((*unsafe.Pointer)(k))
998 }
999 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
1000 if checkBucket != noCheck && !h.sameSizeGrow() {
1001
1002
1003
1004
1005
1006
1007
1008 if t.ReflexiveKey() || t.Key.Equal(k, k) {
1009
1010
1011 hash := t.Hasher(k, uintptr(h.hash0))
1012 if hash&bucketMask(it.B) != checkBucket {
1013 continue
1014 }
1015 } else {
1016
1017
1018
1019
1020
1021
1022
1023 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
1024 continue
1025 }
1026 }
1027 }
1028 if it.clearSeq == h.clearSeq &&
1029 ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
1030 !(t.ReflexiveKey() || t.Key.Equal(k, k))) {
1031
1032
1033
1034
1035 it.key = k
1036 if t.IndirectElem() {
1037 e = *((*unsafe.Pointer)(e))
1038 }
1039 it.elem = e
1040 } else {
1041
1042
1043
1044
1045
1046
1047
1048 rk, re := mapaccessK(t, h, k)
1049 if rk == nil {
1050 continue
1051 }
1052 it.key = rk
1053 it.elem = re
1054 }
1055 it.bucket = bucket
1056 if it.bptr != b {
1057 it.bptr = b
1058 }
1059 it.i = i + 1
1060 it.checkBucket = checkBucket
1061 return
1062 }
1063 b = b.overflow(t)
1064 i = 0
1065 goto next
1066 }
1067
1068
1069
1070 func mapclear(t *maptype, h *hmap) {
1071 if raceenabled && h != nil {
1072 callerpc := sys.GetCallerPC()
1073 pc := abi.FuncPCABIInternal(mapclear)
1074 racewritepc(unsafe.Pointer(h), callerpc, pc)
1075 }
1076
1077 if h == nil || h.count == 0 {
1078 return
1079 }
1080
1081 if h.flags&hashWriting != 0 {
1082 fatal("concurrent map writes")
1083 }
1084
1085 h.flags ^= hashWriting
1086 h.flags &^= sameSizeGrow
1087 h.oldbuckets = nil
1088 h.nevacuate = 0
1089 h.noverflow = 0
1090 h.count = 0
1091 h.clearSeq++
1092
1093
1094
1095 h.hash0 = uint32(rand())
1096
1097
1098 if h.extra != nil {
1099 *h.extra = mapextra{}
1100 }
1101
1102
1103
1104
1105 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1106 if nextOverflow != nil {
1107
1108
1109 h.extra.nextOverflow = nextOverflow
1110 }
1111
1112 if h.flags&hashWriting == 0 {
1113 fatal("concurrent map writes")
1114 }
1115 h.flags &^= hashWriting
1116 }
1117
1118 func hashGrow(t *maptype, h *hmap) {
1119
1120
1121
1122 bigger := uint8(1)
1123 if !overLoadFactor(h.count+1, h.B) {
1124 bigger = 0
1125 h.flags |= sameSizeGrow
1126 }
1127 oldbuckets := h.buckets
1128 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1129
1130 flags := h.flags &^ (iterator | oldIterator)
1131 if h.flags&iterator != 0 {
1132 flags |= oldIterator
1133 }
1134
1135 h.B += bigger
1136 h.flags = flags
1137 h.oldbuckets = oldbuckets
1138 h.buckets = newbuckets
1139 h.nevacuate = 0
1140 h.noverflow = 0
1141
1142 if h.extra != nil && h.extra.overflow != nil {
1143
1144 if h.extra.oldoverflow != nil {
1145 throw("oldoverflow is not nil")
1146 }
1147 h.extra.oldoverflow = h.extra.overflow
1148 h.extra.overflow = nil
1149 }
1150 if nextOverflow != nil {
1151 if h.extra == nil {
1152 h.extra = new(mapextra)
1153 }
1154 h.extra.nextOverflow = nextOverflow
1155 }
1156
1157
1158
1159 }
1160
1161
1162 func overLoadFactor(count int, B uint8) bool {
1163 return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1164 }
1165
1166
1167
1168
1169 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1170
1171
1172
1173
1174 if B > 15 {
1175 B = 15
1176 }
1177
1178 return noverflow >= uint16(1)<<(B&15)
1179 }
1180
1181
1182 func (h *hmap) growing() bool {
1183 return h.oldbuckets != nil
1184 }
1185
1186
1187 func (h *hmap) sameSizeGrow() bool {
1188 return h.flags&sameSizeGrow != 0
1189 }
1190
1191
1192 func sameSizeGrowForIssue69110Test(h *hmap) bool {
1193 return h.sameSizeGrow()
1194 }
1195
1196
1197 func (h *hmap) noldbuckets() uintptr {
1198 oldB := h.B
1199 if !h.sameSizeGrow() {
1200 oldB--
1201 }
1202 return bucketShift(oldB)
1203 }
1204
1205
1206 func (h *hmap) oldbucketmask() uintptr {
1207 return h.noldbuckets() - 1
1208 }
1209
1210 func growWork(t *maptype, h *hmap, bucket uintptr) {
1211
1212
1213 evacuate(t, h, bucket&h.oldbucketmask())
1214
1215
1216 if h.growing() {
1217 evacuate(t, h, h.nevacuate)
1218 }
1219 }
1220
1221 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1222 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1223 return evacuated(b)
1224 }
1225
1226
1227 type evacDst struct {
1228 b *bmap
1229 i int
1230 k unsafe.Pointer
1231 e unsafe.Pointer
1232 }
1233
1234 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1235 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1236 newbit := h.noldbuckets()
1237 if !evacuated(b) {
1238
1239
1240
1241
1242 var xy [2]evacDst
1243 x := &xy[0]
1244 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1245 x.k = add(unsafe.Pointer(x.b), dataOffset)
1246 x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1247
1248 if !h.sameSizeGrow() {
1249
1250
1251 y := &xy[1]
1252 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1253 y.k = add(unsafe.Pointer(y.b), dataOffset)
1254 y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1255 }
1256
1257 for ; b != nil; b = b.overflow(t) {
1258 k := add(unsafe.Pointer(b), dataOffset)
1259 e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
1260 for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1261 top := b.tophash[i]
1262 if isEmpty(top) {
1263 b.tophash[i] = evacuatedEmpty
1264 continue
1265 }
1266 if top < minTopHash {
1267 throw("bad map state")
1268 }
1269 k2 := k
1270 if t.IndirectKey() {
1271 k2 = *((*unsafe.Pointer)(k2))
1272 }
1273 var useY uint8
1274 if !h.sameSizeGrow() {
1275
1276
1277 hash := t.Hasher(k2, uintptr(h.hash0))
1278 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 useY = top & 1
1291 top = tophash(hash)
1292 } else {
1293 if hash&newbit != 0 {
1294 useY = 1
1295 }
1296 }
1297 }
1298
1299 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1300 throw("bad evacuatedN")
1301 }
1302
1303 b.tophash[i] = evacuatedX + useY
1304 dst := &xy[useY]
1305
1306 if dst.i == abi.OldMapBucketCount {
1307 dst.b = h.newoverflow(t, dst.b)
1308 dst.i = 0
1309 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1310 dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1311 }
1312 dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top
1313 if t.IndirectKey() {
1314 *(*unsafe.Pointer)(dst.k) = k2
1315 } else {
1316 typedmemmove(t.Key, dst.k, k)
1317 }
1318 if t.IndirectElem() {
1319 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1320 } else {
1321 typedmemmove(t.Elem, dst.e, e)
1322 }
1323 dst.i++
1324
1325
1326
1327
1328 dst.k = add(dst.k, uintptr(t.KeySize))
1329 dst.e = add(dst.e, uintptr(t.ValueSize))
1330 }
1331 }
1332
1333 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
1334 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1335
1336
1337 ptr := add(b, dataOffset)
1338 n := uintptr(t.BucketSize) - dataOffset
1339 memclrHasPointers(ptr, n)
1340 }
1341 }
1342
1343 if oldbucket == h.nevacuate {
1344 advanceEvacuationMark(h, t, newbit)
1345 }
1346 }
1347
1348 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1349 h.nevacuate++
1350
1351
1352 stop := h.nevacuate + 1024
1353 if stop > newbit {
1354 stop = newbit
1355 }
1356 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1357 h.nevacuate++
1358 }
1359 if h.nevacuate == newbit {
1360
1361 h.oldbuckets = nil
1362
1363
1364
1365 if h.extra != nil {
1366 h.extra.oldoverflow = nil
1367 }
1368 h.flags &^= sameSizeGrow
1369 }
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 func reflect_makemap(t *maptype, cap int) *hmap {
1389
1390 if t.Key.Equal == nil {
1391 throw("runtime.reflect_makemap: unsupported map key type")
1392 }
1393 if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1394 t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1395 throw("key size wrong")
1396 }
1397 if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1398 t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1399 throw("elem size wrong")
1400 }
1401 if t.Key.Align_ > abi.OldMapBucketCount {
1402 throw("key align too big")
1403 }
1404 if t.Elem.Align_ > abi.OldMapBucketCount {
1405 throw("elem align too big")
1406 }
1407 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1408 throw("key size not a multiple of key align")
1409 }
1410 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1411 throw("elem size not a multiple of elem align")
1412 }
1413 if abi.OldMapBucketCount < 8 {
1414 throw("bucketsize too small for proper alignment")
1415 }
1416 if dataOffset%uintptr(t.Key.Align_) != 0 {
1417 throw("need padding in bucket (key)")
1418 }
1419 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1420 throw("need padding in bucket (elem)")
1421 }
1422
1423 return makemap(t, cap, nil)
1424 }
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1438 elem, ok := mapaccess2(t, h, key)
1439 if !ok {
1440
1441 elem = nil
1442 }
1443 return elem
1444 }
1445
1446
1447 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1448 elem, ok := mapaccess2_faststr(t, h, key)
1449 if !ok {
1450
1451 elem = nil
1452 }
1453 return elem
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1466 p := mapassign(t, h, key)
1467 typedmemmove(t.Elem, p, elem)
1468 }
1469
1470
1471 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1472 p := mapassign_faststr(t, h, key)
1473 typedmemmove(t.Elem, p, elem)
1474 }
1475
1476
1477 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1478 mapdelete(t, h, key)
1479 }
1480
1481
1482 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1483 mapdelete_faststr(t, h, key)
1484 }
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1499 mapiterinit(t, h, it)
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 func reflect_mapiternext(it *hiter) {
1516 mapiternext(it)
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1530 return it.key
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1544 return it.elem
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 func reflect_maplen(h *hmap) int {
1558 if h == nil {
1559 return 0
1560 }
1561 if raceenabled {
1562 callerpc := sys.GetCallerPC()
1563 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1564 }
1565 return h.count
1566 }
1567
1568
1569 func reflect_mapclear(t *maptype, h *hmap) {
1570 mapclear(t, h)
1571 }
1572
1573
1574 func reflectlite_maplen(h *hmap) int {
1575 if h == nil {
1576 return 0
1577 }
1578 if raceenabled {
1579 callerpc := sys.GetCallerPC()
1580 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1581 }
1582 return h.count
1583 }
1584
1585
1586
1587
1588
1589
1590 func mapinitnoop()
1591
1592
1593
1594
1595 func mapclone(m any) any {
1596 e := efaceOf(&m)
1597 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1598 return m
1599 }
1600
1601
1602
1603 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1604 for i := 0; i < abi.OldMapBucketCount; i++ {
1605 if isEmpty(src.tophash[i]) {
1606 continue
1607 }
1608
1609 for ; pos < abi.OldMapBucketCount; pos++ {
1610 if isEmpty(dst.tophash[pos]) {
1611 break
1612 }
1613 }
1614
1615 if pos == abi.OldMapBucketCount {
1616 dst = h.newoverflow(t, dst)
1617 pos = 0
1618 }
1619
1620 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1621 srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1622 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1623 dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1624
1625 dst.tophash[pos] = src.tophash[i]
1626 if t.IndirectKey() {
1627 srcK = *(*unsafe.Pointer)(srcK)
1628 if t.NeedKeyUpdate() {
1629 kStore := newobject(t.Key)
1630 typedmemmove(t.Key, kStore, srcK)
1631 srcK = kStore
1632 }
1633
1634
1635
1636 *(*unsafe.Pointer)(dstK) = srcK
1637 } else {
1638 typedmemmove(t.Key, dstK, srcK)
1639 }
1640 if t.IndirectElem() {
1641 srcEle = *(*unsafe.Pointer)(srcEle)
1642 eStore := newobject(t.Elem)
1643 typedmemmove(t.Elem, eStore, srcEle)
1644 *(*unsafe.Pointer)(dstEle) = eStore
1645 } else {
1646 typedmemmove(t.Elem, dstEle, srcEle)
1647 }
1648 pos++
1649 h.count++
1650 }
1651 return dst, pos
1652 }
1653
1654 func mapclone2(t *maptype, src *hmap) *hmap {
1655 hint := src.count
1656 if overLoadFactor(hint, src.B) {
1657
1658
1659
1660
1661
1662 hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
1663 }
1664 dst := makemap(t, hint, nil)
1665 dst.hash0 = src.hash0
1666 dst.nevacuate = 0
1667
1668
1669 if src.count == 0 {
1670 return dst
1671 }
1672
1673 if src.flags&hashWriting != 0 {
1674 fatal("concurrent map clone and map write")
1675 }
1676
1677 if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1678
1679 dst.buckets = newobject(t.Bucket)
1680 dst.count = src.count
1681 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1682 return dst
1683 }
1684
1685 if dst.B == 0 {
1686 dst.buckets = newobject(t.Bucket)
1687 }
1688 dstArraySize := int(bucketShift(dst.B))
1689 srcArraySize := int(bucketShift(src.B))
1690 for i := 0; i < dstArraySize; i++ {
1691 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1692 pos := 0
1693 for j := 0; j < srcArraySize; j += dstArraySize {
1694 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1695 for srcBmap != nil {
1696 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1697 srcBmap = srcBmap.overflow(t)
1698 }
1699 }
1700 }
1701
1702 if src.oldbuckets == nil {
1703 return dst
1704 }
1705
1706 oldB := src.B
1707 srcOldbuckets := src.oldbuckets
1708 if !src.sameSizeGrow() {
1709 oldB--
1710 }
1711 oldSrcArraySize := int(bucketShift(oldB))
1712
1713 for i := 0; i < oldSrcArraySize; i++ {
1714 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1715 if evacuated(srcBmap) {
1716 continue
1717 }
1718
1719 if oldB >= dst.B {
1720 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1721 for dstBmap.overflow(t) != nil {
1722 dstBmap = dstBmap.overflow(t)
1723 }
1724 pos := 0
1725 for srcBmap != nil {
1726 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1727 srcBmap = srcBmap.overflow(t)
1728 }
1729 continue
1730 }
1731
1732
1733
1734 for srcBmap != nil {
1735
1736 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1737 if isEmpty(srcBmap.tophash[i]) {
1738 continue
1739 }
1740
1741 if src.flags&hashWriting != 0 {
1742 fatal("concurrent map clone and map write")
1743 }
1744
1745 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1746 if t.IndirectKey() {
1747 srcK = *((*unsafe.Pointer)(srcK))
1748 }
1749
1750 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1751 if t.IndirectElem() {
1752 srcEle = *((*unsafe.Pointer)(srcEle))
1753 }
1754 dstEle := mapassign(t, dst, srcK)
1755 typedmemmove(t.Elem, dstEle, srcEle)
1756 }
1757 srcBmap = srcBmap.overflow(t)
1758 }
1759 }
1760 return dst
1761 }
1762
1763
1764
1765
1766 func keys(m any, p unsafe.Pointer) {
1767 e := efaceOf(&m)
1768 t := (*maptype)(unsafe.Pointer(e._type))
1769 h := (*hmap)(e.data)
1770
1771 if h == nil || h.count == 0 {
1772 return
1773 }
1774 s := (*slice)(p)
1775 r := int(rand())
1776 offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
1777 if h.B == 0 {
1778 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1779 return
1780 }
1781 arraySize := int(bucketShift(h.B))
1782 buckets := h.buckets
1783 for i := 0; i < arraySize; i++ {
1784 bucket := (i + r) & (arraySize - 1)
1785 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1786 copyKeys(t, h, b, s, offset)
1787 }
1788
1789 if h.growing() {
1790 oldArraySize := int(h.noldbuckets())
1791 for i := 0; i < oldArraySize; i++ {
1792 bucket := (i + r) & (oldArraySize - 1)
1793 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1794 if evacuated(b) {
1795 continue
1796 }
1797 copyKeys(t, h, b, s, offset)
1798 }
1799 }
1800 return
1801 }
1802
1803 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1804 for b != nil {
1805 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1806 offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
1807 if isEmpty(b.tophash[offi]) {
1808 continue
1809 }
1810 if h.flags&hashWriting != 0 {
1811 fatal("concurrent map read and map write")
1812 }
1813 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1814 if t.IndirectKey() {
1815 k = *((*unsafe.Pointer)(k))
1816 }
1817 if s.len >= s.cap {
1818 fatal("concurrent map read and map write")
1819 }
1820 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1821 s.len++
1822 }
1823 b = b.overflow(t)
1824 }
1825 }
1826
1827
1828
1829
1830 func values(m any, p unsafe.Pointer) {
1831 e := efaceOf(&m)
1832 t := (*maptype)(unsafe.Pointer(e._type))
1833 h := (*hmap)(e.data)
1834 if h == nil || h.count == 0 {
1835 return
1836 }
1837 s := (*slice)(p)
1838 r := int(rand())
1839 offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
1840 if h.B == 0 {
1841 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1842 return
1843 }
1844 arraySize := int(bucketShift(h.B))
1845 buckets := h.buckets
1846 for i := 0; i < arraySize; i++ {
1847 bucket := (i + r) & (arraySize - 1)
1848 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1849 copyValues(t, h, b, s, offset)
1850 }
1851
1852 if h.growing() {
1853 oldArraySize := int(h.noldbuckets())
1854 for i := 0; i < oldArraySize; i++ {
1855 bucket := (i + r) & (oldArraySize - 1)
1856 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1857 if evacuated(b) {
1858 continue
1859 }
1860 copyValues(t, h, b, s, offset)
1861 }
1862 }
1863 return
1864 }
1865
1866 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1867 for b != nil {
1868 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1869 offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
1870 if isEmpty(b.tophash[offi]) {
1871 continue
1872 }
1873
1874 if h.flags&hashWriting != 0 {
1875 fatal("concurrent map read and map write")
1876 }
1877
1878 ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1879 if t.IndirectElem() {
1880 ele = *((*unsafe.Pointer)(ele))
1881 }
1882 if s.len >= s.cap {
1883 fatal("concurrent map read and map write")
1884 }
1885 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1886 s.len++
1887 }
1888 b = b.overflow(t)
1889 }
1890 }
1891
View as plain text