Source file
src/runtime/map_fast32_noswiss.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/sys"
13 "unsafe"
14 )
15
16 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
17 if raceenabled && h != nil {
18 callerpc := sys.GetCallerPC()
19 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
20 }
21 if h == nil || h.count == 0 {
22 return unsafe.Pointer(&zeroVal[0])
23 }
24 if h.flags&hashWriting != 0 {
25 fatal("concurrent map read and map write")
26 }
27 var b *bmap
28 if h.B == 0 {
29
30 b = (*bmap)(h.buckets)
31 } else {
32 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
33 m := bucketMask(h.B)
34 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
35 if c := h.oldbuckets; c != nil {
36 if !h.sameSizeGrow() {
37
38 m >>= 1
39 }
40 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
41 if !evacuated(oldb) {
42 b = oldb
43 }
44 }
45 }
46 for ; b != nil; b = b.overflow(t) {
47 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
48 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
49 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
50 }
51 }
52 }
53 return unsafe.Pointer(&zeroVal[0])
54 }
55
56
57
58
59
60
61
62
63
64
65 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
66 if raceenabled && h != nil {
67 callerpc := sys.GetCallerPC()
68 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
69 }
70 if h == nil || h.count == 0 {
71 return unsafe.Pointer(&zeroVal[0]), false
72 }
73 if h.flags&hashWriting != 0 {
74 fatal("concurrent map read and map write")
75 }
76 var b *bmap
77 if h.B == 0 {
78
79 b = (*bmap)(h.buckets)
80 } else {
81 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
82 m := bucketMask(h.B)
83 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
84 if c := h.oldbuckets; c != nil {
85 if !h.sameSizeGrow() {
86
87 m >>= 1
88 }
89 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
90 if !evacuated(oldb) {
91 b = oldb
92 }
93 }
94 }
95 for ; b != nil; b = b.overflow(t) {
96 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
97 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
98 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
99 }
100 }
101 }
102 return unsafe.Pointer(&zeroVal[0]), false
103 }
104
105
106
107
108
109
110
111
112
113
114
115 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
116 if h == nil {
117 panic(plainError("assignment to entry in nil map"))
118 }
119 if raceenabled {
120 callerpc := sys.GetCallerPC()
121 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
122 }
123 if h.flags&hashWriting != 0 {
124 fatal("concurrent map writes")
125 }
126 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
127
128
129 h.flags ^= hashWriting
130
131 if h.buckets == nil {
132 h.buckets = newobject(t.Bucket)
133 }
134
135 again:
136 bucket := hash & bucketMask(h.B)
137 if h.growing() {
138 growWork_fast32(t, h, bucket)
139 }
140 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
141
142 var insertb *bmap
143 var inserti uintptr
144 var insertk unsafe.Pointer
145
146 bucketloop:
147 for {
148 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
149 if isEmpty(b.tophash[i]) {
150 if insertb == nil {
151 inserti = i
152 insertb = b
153 }
154 if b.tophash[i] == emptyRest {
155 break bucketloop
156 }
157 continue
158 }
159 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
160 if k != key {
161 continue
162 }
163 inserti = i
164 insertb = b
165 goto done
166 }
167 ovf := b.overflow(t)
168 if ovf == nil {
169 break
170 }
171 b = ovf
172 }
173
174
175
176
177
178 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
179 hashGrow(t, h)
180 goto again
181 }
182
183 if insertb == nil {
184
185 insertb = h.newoverflow(t, b)
186 inserti = 0
187 }
188 insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash)
189
190 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
191
192 *(*uint32)(insertk) = key
193
194 h.count++
195
196 done:
197 elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
198 if h.flags&hashWriting == 0 {
199 fatal("concurrent map writes")
200 }
201 h.flags &^= hashWriting
202 return elem
203 }
204
205
206
207
208
209
210
211
212
213
214 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
215 if h == nil {
216 panic(plainError("assignment to entry in nil map"))
217 }
218 if raceenabled {
219 callerpc := sys.GetCallerPC()
220 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
221 }
222 if h.flags&hashWriting != 0 {
223 fatal("concurrent map writes")
224 }
225 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
226
227
228 h.flags ^= hashWriting
229
230 if h.buckets == nil {
231 h.buckets = newobject(t.Bucket)
232 }
233
234 again:
235 bucket := hash & bucketMask(h.B)
236 if h.growing() {
237 growWork_fast32(t, h, bucket)
238 }
239 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
240
241 var insertb *bmap
242 var inserti uintptr
243 var insertk unsafe.Pointer
244
245 bucketloop:
246 for {
247 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
248 if isEmpty(b.tophash[i]) {
249 if insertb == nil {
250 inserti = i
251 insertb = b
252 }
253 if b.tophash[i] == emptyRest {
254 break bucketloop
255 }
256 continue
257 }
258 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
259 if k != key {
260 continue
261 }
262 inserti = i
263 insertb = b
264 goto done
265 }
266 ovf := b.overflow(t)
267 if ovf == nil {
268 break
269 }
270 b = ovf
271 }
272
273
274
275
276
277 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
278 hashGrow(t, h)
279 goto again
280 }
281
282 if insertb == nil {
283
284 insertb = h.newoverflow(t, b)
285 inserti = 0
286 }
287 insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash)
288
289 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
290
291 *(*unsafe.Pointer)(insertk) = key
292
293 h.count++
294
295 done:
296 elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
297 if h.flags&hashWriting == 0 {
298 fatal("concurrent map writes")
299 }
300 h.flags &^= hashWriting
301 return elem
302 }
303
304 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
305 if raceenabled && h != nil {
306 callerpc := sys.GetCallerPC()
307 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
308 }
309 if h == nil || h.count == 0 {
310 return
311 }
312 if h.flags&hashWriting != 0 {
313 fatal("concurrent map writes")
314 }
315
316 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
317
318
319 h.flags ^= hashWriting
320
321 bucket := hash & bucketMask(h.B)
322 if h.growing() {
323 growWork_fast32(t, h, bucket)
324 }
325 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
326 bOrig := b
327 search:
328 for ; b != nil; b = b.overflow(t) {
329 for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
330 if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
331 continue
332 }
333
334
335
336 if goarch.PtrSize == 4 && t.Key.Pointers() {
337
338
339 *(*unsafe.Pointer)(k) = nil
340 }
341 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
342 if t.Elem.Pointers() {
343 memclrHasPointers(e, t.Elem.Size_)
344 } else {
345 memclrNoHeapPointers(e, t.Elem.Size_)
346 }
347 b.tophash[i] = emptyOne
348
349
350 if i == abi.OldMapBucketCount-1 {
351 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
352 goto notLast
353 }
354 } else {
355 if b.tophash[i+1] != emptyRest {
356 goto notLast
357 }
358 }
359 for {
360 b.tophash[i] = emptyRest
361 if i == 0 {
362 if b == bOrig {
363 break
364 }
365
366 c := b
367 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
368 }
369 i = abi.OldMapBucketCount - 1
370 } else {
371 i--
372 }
373 if b.tophash[i] != emptyOne {
374 break
375 }
376 }
377 notLast:
378 h.count--
379
380
381 if h.count == 0 {
382 h.hash0 = uint32(rand())
383 }
384 break search
385 }
386 }
387
388 if h.flags&hashWriting == 0 {
389 fatal("concurrent map writes")
390 }
391 h.flags &^= hashWriting
392 }
393
394 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
395
396
397 evacuate_fast32(t, h, bucket&h.oldbucketmask())
398
399
400 if h.growing() {
401 evacuate_fast32(t, h, h.nevacuate)
402 }
403 }
404
405 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
406 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
407 newbit := h.noldbuckets()
408 if !evacuated(b) {
409
410
411
412
413 var xy [2]evacDst
414 x := &xy[0]
415 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
416 x.k = add(unsafe.Pointer(x.b), dataOffset)
417 x.e = add(x.k, abi.OldMapBucketCount*4)
418
419 if !h.sameSizeGrow() {
420
421
422 y := &xy[1]
423 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
424 y.k = add(unsafe.Pointer(y.b), dataOffset)
425 y.e = add(y.k, abi.OldMapBucketCount*4)
426 }
427
428 for ; b != nil; b = b.overflow(t) {
429 k := add(unsafe.Pointer(b), dataOffset)
430 e := add(k, abi.OldMapBucketCount*4)
431 for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
432 top := b.tophash[i]
433 if isEmpty(top) {
434 b.tophash[i] = evacuatedEmpty
435 continue
436 }
437 if top < minTopHash {
438 throw("bad map state")
439 }
440 var useY uint8
441 if !h.sameSizeGrow() {
442
443
444 hash := t.Hasher(k, uintptr(h.hash0))
445 if hash&newbit != 0 {
446 useY = 1
447 }
448 }
449
450 b.tophash[i] = evacuatedX + useY
451 dst := &xy[useY]
452
453 if dst.i == abi.OldMapBucketCount {
454 dst.b = h.newoverflow(t, dst.b)
455 dst.i = 0
456 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
457 dst.e = add(dst.k, abi.OldMapBucketCount*4)
458 }
459 dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top
460
461
462 if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
463
464 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
465 } else {
466 *(*uint32)(dst.k) = *(*uint32)(k)
467 }
468
469 typedmemmove(t.Elem, dst.e, e)
470 dst.i++
471
472
473
474
475 dst.k = add(dst.k, 4)
476 dst.e = add(dst.e, uintptr(t.ValueSize))
477 }
478 }
479
480 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
481 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
482
483
484 ptr := add(b, dataOffset)
485 n := uintptr(t.BucketSize) - dataOffset
486 memclrHasPointers(ptr, n)
487 }
488 }
489
490 if oldbucket == h.nevacuate {
491 advanceEvacuationMark(h, t, newbit)
492 }
493 }
494
View as plain text