Source file
src/runtime/map_fast64_noswiss.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/sys"
13 "unsafe"
14 )
15
16 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
17 if raceenabled && h != nil {
18 callerpc := sys.GetCallerPC()
19 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
20 }
21 if h == nil || h.count == 0 {
22 return unsafe.Pointer(&zeroVal[0])
23 }
24 if h.flags&hashWriting != 0 {
25 fatal("concurrent map read and map write")
26 }
27 var b *bmap
28 if h.B == 0 {
29
30 b = (*bmap)(h.buckets)
31 } else {
32 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
33 m := bucketMask(h.B)
34 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
35 if c := h.oldbuckets; c != nil {
36 if !h.sameSizeGrow() {
37
38 m >>= 1
39 }
40 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
41 if !evacuated(oldb) {
42 b = oldb
43 }
44 }
45 }
46 for ; b != nil; b = b.overflow(t) {
47 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
48 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
49 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
50 }
51 }
52 }
53 return unsafe.Pointer(&zeroVal[0])
54 }
55
56
57
58
59
60
61
62
63
64
65 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
66 if raceenabled && h != nil {
67 callerpc := sys.GetCallerPC()
68 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
69 }
70 if h == nil || h.count == 0 {
71 return unsafe.Pointer(&zeroVal[0]), false
72 }
73 if h.flags&hashWriting != 0 {
74 fatal("concurrent map read and map write")
75 }
76 var b *bmap
77 if h.B == 0 {
78
79 b = (*bmap)(h.buckets)
80 } else {
81 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
82 m := bucketMask(h.B)
83 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
84 if c := h.oldbuckets; c != nil {
85 if !h.sameSizeGrow() {
86
87 m >>= 1
88 }
89 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
90 if !evacuated(oldb) {
91 b = oldb
92 }
93 }
94 }
95 for ; b != nil; b = b.overflow(t) {
96 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
97 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
98 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
99 }
100 }
101 }
102 return unsafe.Pointer(&zeroVal[0]), false
103 }
104
105
106
107
108
109
110
111
112
113
114
115 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
116 if h == nil {
117 panic(plainError("assignment to entry in nil map"))
118 }
119 if raceenabled {
120 callerpc := sys.GetCallerPC()
121 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
122 }
123 if h.flags&hashWriting != 0 {
124 fatal("concurrent map writes")
125 }
126 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
127
128
129 h.flags ^= hashWriting
130
131 if h.buckets == nil {
132 h.buckets = newobject(t.Bucket)
133 }
134
135 again:
136 bucket := hash & bucketMask(h.B)
137 if h.growing() {
138 growWork_fast64(t, h, bucket)
139 }
140 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
141
142 var insertb *bmap
143 var inserti uintptr
144 var insertk unsafe.Pointer
145
146 bucketloop:
147 for {
148 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
149 if isEmpty(b.tophash[i]) {
150 if insertb == nil {
151 insertb = b
152 inserti = i
153 }
154 if b.tophash[i] == emptyRest {
155 break bucketloop
156 }
157 continue
158 }
159 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
160 if k != key {
161 continue
162 }
163 insertb = b
164 inserti = i
165 goto done
166 }
167 ovf := b.overflow(t)
168 if ovf == nil {
169 break
170 }
171 b = ovf
172 }
173
174
175
176
177
178 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
179 hashGrow(t, h)
180 goto again
181 }
182
183 if insertb == nil {
184
185 insertb = h.newoverflow(t, b)
186 inserti = 0
187 }
188 insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash)
189
190 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
191
192 *(*uint64)(insertk) = key
193
194 h.count++
195
196 done:
197 elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
198 if h.flags&hashWriting == 0 {
199 fatal("concurrent map writes")
200 }
201 h.flags &^= hashWriting
202 return elem
203 }
204
205
206
207
208
209
210
211
212
213
214
215 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
216 if h == nil {
217 panic(plainError("assignment to entry in nil map"))
218 }
219 if raceenabled {
220 callerpc := sys.GetCallerPC()
221 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
222 }
223 if h.flags&hashWriting != 0 {
224 fatal("concurrent map writes")
225 }
226 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
227
228
229 h.flags ^= hashWriting
230
231 if h.buckets == nil {
232 h.buckets = newobject(t.Bucket)
233 }
234
235 again:
236 bucket := hash & bucketMask(h.B)
237 if h.growing() {
238 growWork_fast64(t, h, bucket)
239 }
240 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
241
242 var insertb *bmap
243 var inserti uintptr
244 var insertk unsafe.Pointer
245
246 bucketloop:
247 for {
248 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
249 if isEmpty(b.tophash[i]) {
250 if insertb == nil {
251 insertb = b
252 inserti = i
253 }
254 if b.tophash[i] == emptyRest {
255 break bucketloop
256 }
257 continue
258 }
259 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
260 if k != key {
261 continue
262 }
263 insertb = b
264 inserti = i
265 goto done
266 }
267 ovf := b.overflow(t)
268 if ovf == nil {
269 break
270 }
271 b = ovf
272 }
273
274
275
276
277
278 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
279 hashGrow(t, h)
280 goto again
281 }
282
283 if insertb == nil {
284
285 insertb = h.newoverflow(t, b)
286 inserti = 0
287 }
288 insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash)
289
290 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
291
292 *(*unsafe.Pointer)(insertk) = key
293
294 h.count++
295
296 done:
297 elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
298 if h.flags&hashWriting == 0 {
299 fatal("concurrent map writes")
300 }
301 h.flags &^= hashWriting
302 return elem
303 }
304
305 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
306 if raceenabled && h != nil {
307 callerpc := sys.GetCallerPC()
308 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
309 }
310 if h == nil || h.count == 0 {
311 return
312 }
313 if h.flags&hashWriting != 0 {
314 fatal("concurrent map writes")
315 }
316
317 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
318
319
320 h.flags ^= hashWriting
321
322 bucket := hash & bucketMask(h.B)
323 if h.growing() {
324 growWork_fast64(t, h, bucket)
325 }
326 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
327 bOrig := b
328 search:
329 for ; b != nil; b = b.overflow(t) {
330 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
331 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
332 continue
333 }
334
335 if t.Key.Pointers() {
336 if goarch.PtrSize == 8 {
337 *(*unsafe.Pointer)(k) = nil
338 } else {
339
340
341 memclrHasPointers(k, 8)
342 }
343 }
344 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
345 if t.Elem.Pointers() {
346 memclrHasPointers(e, t.Elem.Size_)
347 } else {
348 memclrNoHeapPointers(e, t.Elem.Size_)
349 }
350 b.tophash[i] = emptyOne
351
352
353 if i == abi.OldMapBucketCount-1 {
354 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
355 goto notLast
356 }
357 } else {
358 if b.tophash[i+1] != emptyRest {
359 goto notLast
360 }
361 }
362 for {
363 b.tophash[i] = emptyRest
364 if i == 0 {
365 if b == bOrig {
366 break
367 }
368
369 c := b
370 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
371 }
372 i = abi.OldMapBucketCount - 1
373 } else {
374 i--
375 }
376 if b.tophash[i] != emptyOne {
377 break
378 }
379 }
380 notLast:
381 h.count--
382
383
384 if h.count == 0 {
385 h.hash0 = uint32(rand())
386 }
387 break search
388 }
389 }
390
391 if h.flags&hashWriting == 0 {
392 fatal("concurrent map writes")
393 }
394 h.flags &^= hashWriting
395 }
396
397 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
398
399
400 evacuate_fast64(t, h, bucket&h.oldbucketmask())
401
402
403 if h.growing() {
404 evacuate_fast64(t, h, h.nevacuate)
405 }
406 }
407
408 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
409 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
410 newbit := h.noldbuckets()
411 if !evacuated(b) {
412
413
414
415
416 var xy [2]evacDst
417 x := &xy[0]
418 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
419 x.k = add(unsafe.Pointer(x.b), dataOffset)
420 x.e = add(x.k, abi.OldMapBucketCount*8)
421
422 if !h.sameSizeGrow() {
423
424
425 y := &xy[1]
426 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
427 y.k = add(unsafe.Pointer(y.b), dataOffset)
428 y.e = add(y.k, abi.OldMapBucketCount*8)
429 }
430
431 for ; b != nil; b = b.overflow(t) {
432 k := add(unsafe.Pointer(b), dataOffset)
433 e := add(k, abi.OldMapBucketCount*8)
434 for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
435 top := b.tophash[i]
436 if isEmpty(top) {
437 b.tophash[i] = evacuatedEmpty
438 continue
439 }
440 if top < minTopHash {
441 throw("bad map state")
442 }
443 var useY uint8
444 if !h.sameSizeGrow() {
445
446
447 hash := t.Hasher(k, uintptr(h.hash0))
448 if hash&newbit != 0 {
449 useY = 1
450 }
451 }
452
453 b.tophash[i] = evacuatedX + useY
454 dst := &xy[useY]
455
456 if dst.i == abi.OldMapBucketCount {
457 dst.b = h.newoverflow(t, dst.b)
458 dst.i = 0
459 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
460 dst.e = add(dst.k, abi.OldMapBucketCount*8)
461 }
462 dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top
463
464
465 if t.Key.Pointers() && writeBarrier.enabled {
466 if goarch.PtrSize == 8 {
467
468 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
469 } else {
470
471
472 typedmemmove(t.Key, dst.k, k)
473 }
474 } else {
475 *(*uint64)(dst.k) = *(*uint64)(k)
476 }
477
478 typedmemmove(t.Elem, dst.e, e)
479 dst.i++
480
481
482
483
484 dst.k = add(dst.k, 8)
485 dst.e = add(dst.e, uintptr(t.ValueSize))
486 }
487 }
488
489 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
490 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
491
492
493 ptr := add(b, dataOffset)
494 n := uintptr(t.BucketSize) - dataOffset
495 memclrHasPointers(ptr, n)
496 }
497 }
498
499 if oldbucket == h.nevacuate {
500 advanceEvacuationMark(h, t, newbit)
501 }
502 }
503
View as plain text