Source file
src/runtime/map_fast64.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 fatal("concurrent map read and map write")
23 }
24 var b *bmap
25 if h.B == 0 {
26
27 b = (*bmap)(h.buckets)
28 } else {
29 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30 m := bucketMask(h.B)
31 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32 if c := h.oldbuckets; c != nil {
33 if !h.sameSizeGrow() {
34
35 m >>= 1
36 }
37 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
38 if !evacuated(oldb) {
39 b = oldb
40 }
41 }
42 }
43 for ; b != nil; b = b.overflow(t) {
44 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
45 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
46 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
47 }
48 }
49 }
50 return unsafe.Pointer(&zeroVal[0])
51 }
52
53
54
55
56
57
58
59
60
61
62 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
63 if raceenabled && h != nil {
64 callerpc := getcallerpc()
65 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
66 }
67 if h == nil || h.count == 0 {
68 return unsafe.Pointer(&zeroVal[0]), false
69 }
70 if h.flags&hashWriting != 0 {
71 fatal("concurrent map read and map write")
72 }
73 var b *bmap
74 if h.B == 0 {
75
76 b = (*bmap)(h.buckets)
77 } else {
78 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
79 m := bucketMask(h.B)
80 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
81 if c := h.oldbuckets; c != nil {
82 if !h.sameSizeGrow() {
83
84 m >>= 1
85 }
86 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
87 if !evacuated(oldb) {
88 b = oldb
89 }
90 }
91 }
92 for ; b != nil; b = b.overflow(t) {
93 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
94 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
95 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
96 }
97 }
98 }
99 return unsafe.Pointer(&zeroVal[0]), false
100 }
101
102
103
104
105
106
107
108
109
110
111
112
113 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
114 if h == nil {
115 panic(plainError("assignment to entry in nil map"))
116 }
117 if raceenabled {
118 callerpc := getcallerpc()
119 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
120 }
121 if h.flags&hashWriting != 0 {
122 fatal("concurrent map writes")
123 }
124 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
125
126
127 h.flags ^= hashWriting
128
129 if h.buckets == nil {
130 h.buckets = newobject(t.Bucket)
131 }
132
133 again:
134 bucket := hash & bucketMask(h.B)
135 if h.growing() {
136 growWork_fast64(t, h, bucket)
137 }
138 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
139
140 var insertb *bmap
141 var inserti uintptr
142 var insertk unsafe.Pointer
143
144 bucketloop:
145 for {
146 for i := uintptr(0); i < abi.MapBucketCount; i++ {
147 if isEmpty(b.tophash[i]) {
148 if insertb == nil {
149 insertb = b
150 inserti = i
151 }
152 if b.tophash[i] == emptyRest {
153 break bucketloop
154 }
155 continue
156 }
157 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
158 if k != key {
159 continue
160 }
161 insertb = b
162 inserti = i
163 goto done
164 }
165 ovf := b.overflow(t)
166 if ovf == nil {
167 break
168 }
169 b = ovf
170 }
171
172
173
174
175
176 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
177 hashGrow(t, h)
178 goto again
179 }
180
181 if insertb == nil {
182
183 insertb = h.newoverflow(t, b)
184 inserti = 0
185 }
186 insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash)
187
188 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
189
190 *(*uint64)(insertk) = key
191
192 h.count++
193
194 done:
195 elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
196 if h.flags&hashWriting == 0 {
197 fatal("concurrent map writes")
198 }
199 h.flags &^= hashWriting
200 return elem
201 }
202
203
204
205
206
207
208
209
210
211
212
213
214 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
215 if h == nil {
216 panic(plainError("assignment to entry in nil map"))
217 }
218 if raceenabled {
219 callerpc := getcallerpc()
220 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
221 }
222 if h.flags&hashWriting != 0 {
223 fatal("concurrent map writes")
224 }
225 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
226
227
228 h.flags ^= hashWriting
229
230 if h.buckets == nil {
231 h.buckets = newobject(t.Bucket)
232 }
233
234 again:
235 bucket := hash & bucketMask(h.B)
236 if h.growing() {
237 growWork_fast64(t, h, bucket)
238 }
239 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
240
241 var insertb *bmap
242 var inserti uintptr
243 var insertk unsafe.Pointer
244
245 bucketloop:
246 for {
247 for i := uintptr(0); i < abi.MapBucketCount; i++ {
248 if isEmpty(b.tophash[i]) {
249 if insertb == nil {
250 insertb = b
251 inserti = i
252 }
253 if b.tophash[i] == emptyRest {
254 break bucketloop
255 }
256 continue
257 }
258 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
259 if k != key {
260 continue
261 }
262 insertb = b
263 inserti = i
264 goto done
265 }
266 ovf := b.overflow(t)
267 if ovf == nil {
268 break
269 }
270 b = ovf
271 }
272
273
274
275
276
277 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
278 hashGrow(t, h)
279 goto again
280 }
281
282 if insertb == nil {
283
284 insertb = h.newoverflow(t, b)
285 inserti = 0
286 }
287 insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash)
288
289 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
290
291 *(*unsafe.Pointer)(insertk) = key
292
293 h.count++
294
295 done:
296 elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
297 if h.flags&hashWriting == 0 {
298 fatal("concurrent map writes")
299 }
300 h.flags &^= hashWriting
301 return elem
302 }
303
304 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
305 if raceenabled && h != nil {
306 callerpc := getcallerpc()
307 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
308 }
309 if h == nil || h.count == 0 {
310 return
311 }
312 if h.flags&hashWriting != 0 {
313 fatal("concurrent map writes")
314 }
315
316 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
317
318
319 h.flags ^= hashWriting
320
321 bucket := hash & bucketMask(h.B)
322 if h.growing() {
323 growWork_fast64(t, h, bucket)
324 }
325 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
326 bOrig := b
327 search:
328 for ; b != nil; b = b.overflow(t) {
329 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
330 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
331 continue
332 }
333
334 if t.Key.Pointers() {
335 if goarch.PtrSize == 8 {
336 *(*unsafe.Pointer)(k) = nil
337 } else {
338
339
340 memclrHasPointers(k, 8)
341 }
342 }
343 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
344 if t.Elem.Pointers() {
345 memclrHasPointers(e, t.Elem.Size_)
346 } else {
347 memclrNoHeapPointers(e, t.Elem.Size_)
348 }
349 b.tophash[i] = emptyOne
350
351
352 if i == abi.MapBucketCount-1 {
353 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
354 goto notLast
355 }
356 } else {
357 if b.tophash[i+1] != emptyRest {
358 goto notLast
359 }
360 }
361 for {
362 b.tophash[i] = emptyRest
363 if i == 0 {
364 if b == bOrig {
365 break
366 }
367
368 c := b
369 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
370 }
371 i = abi.MapBucketCount - 1
372 } else {
373 i--
374 }
375 if b.tophash[i] != emptyOne {
376 break
377 }
378 }
379 notLast:
380 h.count--
381
382
383 if h.count == 0 {
384 h.hash0 = uint32(rand())
385 }
386 break search
387 }
388 }
389
390 if h.flags&hashWriting == 0 {
391 fatal("concurrent map writes")
392 }
393 h.flags &^= hashWriting
394 }
395
396 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
397
398
399 evacuate_fast64(t, h, bucket&h.oldbucketmask())
400
401
402 if h.growing() {
403 evacuate_fast64(t, h, h.nevacuate)
404 }
405 }
406
407 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
408 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
409 newbit := h.noldbuckets()
410 if !evacuated(b) {
411
412
413
414
415 var xy [2]evacDst
416 x := &xy[0]
417 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
418 x.k = add(unsafe.Pointer(x.b), dataOffset)
419 x.e = add(x.k, abi.MapBucketCount*8)
420
421 if !h.sameSizeGrow() {
422
423
424 y := &xy[1]
425 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
426 y.k = add(unsafe.Pointer(y.b), dataOffset)
427 y.e = add(y.k, abi.MapBucketCount*8)
428 }
429
430 for ; b != nil; b = b.overflow(t) {
431 k := add(unsafe.Pointer(b), dataOffset)
432 e := add(k, abi.MapBucketCount*8)
433 for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
434 top := b.tophash[i]
435 if isEmpty(top) {
436 b.tophash[i] = evacuatedEmpty
437 continue
438 }
439 if top < minTopHash {
440 throw("bad map state")
441 }
442 var useY uint8
443 if !h.sameSizeGrow() {
444
445
446 hash := t.Hasher(k, uintptr(h.hash0))
447 if hash&newbit != 0 {
448 useY = 1
449 }
450 }
451
452 b.tophash[i] = evacuatedX + useY
453 dst := &xy[useY]
454
455 if dst.i == abi.MapBucketCount {
456 dst.b = h.newoverflow(t, dst.b)
457 dst.i = 0
458 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
459 dst.e = add(dst.k, abi.MapBucketCount*8)
460 }
461 dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top
462
463
464 if t.Key.Pointers() && writeBarrier.enabled {
465 if goarch.PtrSize == 8 {
466
467 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
468 } else {
469
470
471 typedmemmove(t.Key, dst.k, k)
472 }
473 } else {
474 *(*uint64)(dst.k) = *(*uint64)(k)
475 }
476
477 typedmemmove(t.Elem, dst.e, e)
478 dst.i++
479
480
481
482
483 dst.k = add(dst.k, 8)
484 dst.e = add(dst.e, uintptr(t.ValueSize))
485 }
486 }
487
488 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
489 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
490
491
492 ptr := add(b, dataOffset)
493 n := uintptr(t.BucketSize) - dataOffset
494 memclrHasPointers(ptr, n)
495 }
496 }
497
498 if oldbucket == h.nevacuate {
499 advanceEvacuationMark(h, t, newbit)
500 }
501 }
502
View as plain text