Source file
src/runtime/map_fast32.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 fatal("concurrent map read and map write")
23 }
24 var b *bmap
25 if h.B == 0 {
26
27 b = (*bmap)(h.buckets)
28 } else {
29 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30 m := bucketMask(h.B)
31 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32 if c := h.oldbuckets; c != nil {
33 if !h.sameSizeGrow() {
34
35 m >>= 1
36 }
37 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
38 if !evacuated(oldb) {
39 b = oldb
40 }
41 }
42 }
43 for ; b != nil; b = b.overflow(t) {
44 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
45 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
46 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
47 }
48 }
49 }
50 return unsafe.Pointer(&zeroVal[0])
51 }
52
53
54
55
56
57
58
59
60
61
62 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
63 if raceenabled && h != nil {
64 callerpc := getcallerpc()
65 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
66 }
67 if h == nil || h.count == 0 {
68 return unsafe.Pointer(&zeroVal[0]), false
69 }
70 if h.flags&hashWriting != 0 {
71 fatal("concurrent map read and map write")
72 }
73 var b *bmap
74 if h.B == 0 {
75
76 b = (*bmap)(h.buckets)
77 } else {
78 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
79 m := bucketMask(h.B)
80 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
81 if c := h.oldbuckets; c != nil {
82 if !h.sameSizeGrow() {
83
84 m >>= 1
85 }
86 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
87 if !evacuated(oldb) {
88 b = oldb
89 }
90 }
91 }
92 for ; b != nil; b = b.overflow(t) {
93 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
94 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
95 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
96 }
97 }
98 }
99 return unsafe.Pointer(&zeroVal[0]), false
100 }
101
102
103
104
105
106
107
108
109
110
111
112
113 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
114 if h == nil {
115 panic(plainError("assignment to entry in nil map"))
116 }
117 if raceenabled {
118 callerpc := getcallerpc()
119 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
120 }
121 if h.flags&hashWriting != 0 {
122 fatal("concurrent map writes")
123 }
124 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
125
126
127 h.flags ^= hashWriting
128
129 if h.buckets == nil {
130 h.buckets = newobject(t.Bucket)
131 }
132
133 again:
134 bucket := hash & bucketMask(h.B)
135 if h.growing() {
136 growWork_fast32(t, h, bucket)
137 }
138 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
139
140 var insertb *bmap
141 var inserti uintptr
142 var insertk unsafe.Pointer
143
144 bucketloop:
145 for {
146 for i := uintptr(0); i < abi.MapBucketCount; i++ {
147 if isEmpty(b.tophash[i]) {
148 if insertb == nil {
149 inserti = i
150 insertb = b
151 }
152 if b.tophash[i] == emptyRest {
153 break bucketloop
154 }
155 continue
156 }
157 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
158 if k != key {
159 continue
160 }
161 inserti = i
162 insertb = b
163 goto done
164 }
165 ovf := b.overflow(t)
166 if ovf == nil {
167 break
168 }
169 b = ovf
170 }
171
172
173
174
175
176 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
177 hashGrow(t, h)
178 goto again
179 }
180
181 if insertb == nil {
182
183 insertb = h.newoverflow(t, b)
184 inserti = 0
185 }
186 insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash)
187
188 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
189
190 *(*uint32)(insertk) = key
191
192 h.count++
193
194 done:
195 elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
196 if h.flags&hashWriting == 0 {
197 fatal("concurrent map writes")
198 }
199 h.flags &^= hashWriting
200 return elem
201 }
202
203
204
205
206
207
208
209
210
211
212 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
213 if h == nil {
214 panic(plainError("assignment to entry in nil map"))
215 }
216 if raceenabled {
217 callerpc := getcallerpc()
218 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
219 }
220 if h.flags&hashWriting != 0 {
221 fatal("concurrent map writes")
222 }
223 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
224
225
226 h.flags ^= hashWriting
227
228 if h.buckets == nil {
229 h.buckets = newobject(t.Bucket)
230 }
231
232 again:
233 bucket := hash & bucketMask(h.B)
234 if h.growing() {
235 growWork_fast32(t, h, bucket)
236 }
237 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
238
239 var insertb *bmap
240 var inserti uintptr
241 var insertk unsafe.Pointer
242
243 bucketloop:
244 for {
245 for i := uintptr(0); i < abi.MapBucketCount; i++ {
246 if isEmpty(b.tophash[i]) {
247 if insertb == nil {
248 inserti = i
249 insertb = b
250 }
251 if b.tophash[i] == emptyRest {
252 break bucketloop
253 }
254 continue
255 }
256 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
257 if k != key {
258 continue
259 }
260 inserti = i
261 insertb = b
262 goto done
263 }
264 ovf := b.overflow(t)
265 if ovf == nil {
266 break
267 }
268 b = ovf
269 }
270
271
272
273
274
275 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
276 hashGrow(t, h)
277 goto again
278 }
279
280 if insertb == nil {
281
282 insertb = h.newoverflow(t, b)
283 inserti = 0
284 }
285 insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash)
286
287 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
288
289 *(*unsafe.Pointer)(insertk) = key
290
291 h.count++
292
293 done:
294 elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
295 if h.flags&hashWriting == 0 {
296 fatal("concurrent map writes")
297 }
298 h.flags &^= hashWriting
299 return elem
300 }
301
302 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
303 if raceenabled && h != nil {
304 callerpc := getcallerpc()
305 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
306 }
307 if h == nil || h.count == 0 {
308 return
309 }
310 if h.flags&hashWriting != 0 {
311 fatal("concurrent map writes")
312 }
313
314 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
315
316
317 h.flags ^= hashWriting
318
319 bucket := hash & bucketMask(h.B)
320 if h.growing() {
321 growWork_fast32(t, h, bucket)
322 }
323 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
324 bOrig := b
325 search:
326 for ; b != nil; b = b.overflow(t) {
327 for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
328 if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
329 continue
330 }
331
332
333
334 if goarch.PtrSize == 4 && t.Key.Pointers() {
335
336
337 *(*unsafe.Pointer)(k) = nil
338 }
339 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
340 if t.Elem.Pointers() {
341 memclrHasPointers(e, t.Elem.Size_)
342 } else {
343 memclrNoHeapPointers(e, t.Elem.Size_)
344 }
345 b.tophash[i] = emptyOne
346
347
348 if i == abi.MapBucketCount-1 {
349 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
350 goto notLast
351 }
352 } else {
353 if b.tophash[i+1] != emptyRest {
354 goto notLast
355 }
356 }
357 for {
358 b.tophash[i] = emptyRest
359 if i == 0 {
360 if b == bOrig {
361 break
362 }
363
364 c := b
365 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
366 }
367 i = abi.MapBucketCount - 1
368 } else {
369 i--
370 }
371 if b.tophash[i] != emptyOne {
372 break
373 }
374 }
375 notLast:
376 h.count--
377
378
379 if h.count == 0 {
380 h.hash0 = uint32(rand())
381 }
382 break search
383 }
384 }
385
386 if h.flags&hashWriting == 0 {
387 fatal("concurrent map writes")
388 }
389 h.flags &^= hashWriting
390 }
391
392 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
393
394
395 evacuate_fast32(t, h, bucket&h.oldbucketmask())
396
397
398 if h.growing() {
399 evacuate_fast32(t, h, h.nevacuate)
400 }
401 }
402
403 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
404 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
405 newbit := h.noldbuckets()
406 if !evacuated(b) {
407
408
409
410
411 var xy [2]evacDst
412 x := &xy[0]
413 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
414 x.k = add(unsafe.Pointer(x.b), dataOffset)
415 x.e = add(x.k, abi.MapBucketCount*4)
416
417 if !h.sameSizeGrow() {
418
419
420 y := &xy[1]
421 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
422 y.k = add(unsafe.Pointer(y.b), dataOffset)
423 y.e = add(y.k, abi.MapBucketCount*4)
424 }
425
426 for ; b != nil; b = b.overflow(t) {
427 k := add(unsafe.Pointer(b), dataOffset)
428 e := add(k, abi.MapBucketCount*4)
429 for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
430 top := b.tophash[i]
431 if isEmpty(top) {
432 b.tophash[i] = evacuatedEmpty
433 continue
434 }
435 if top < minTopHash {
436 throw("bad map state")
437 }
438 var useY uint8
439 if !h.sameSizeGrow() {
440
441
442 hash := t.Hasher(k, uintptr(h.hash0))
443 if hash&newbit != 0 {
444 useY = 1
445 }
446 }
447
448 b.tophash[i] = evacuatedX + useY
449 dst := &xy[useY]
450
451 if dst.i == abi.MapBucketCount {
452 dst.b = h.newoverflow(t, dst.b)
453 dst.i = 0
454 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
455 dst.e = add(dst.k, abi.MapBucketCount*4)
456 }
457 dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top
458
459
460 if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
461
462 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
463 } else {
464 *(*uint32)(dst.k) = *(*uint32)(k)
465 }
466
467 typedmemmove(t.Elem, dst.e, e)
468 dst.i++
469
470
471
472
473 dst.k = add(dst.k, 4)
474 dst.e = add(dst.e, uintptr(t.ValueSize))
475 }
476 }
477
478 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
479 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
480
481
482 ptr := add(b, dataOffset)
483 n := uintptr(t.BucketSize) - dataOffset
484 memclrHasPointers(ptr, n)
485 }
486 }
487
488 if oldbucket == h.nevacuate {
489 advanceEvacuationMark(h, t, newbit)
490 }
491 }
492
View as plain text