Source file
src/runtime/map_faststr.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 fatal("concurrent map read and map write")
23 }
24 key := stringStructOf(&ky)
25 if h.B == 0 {
26
27 b := (*bmap)(h.buckets)
28 if key.len < 32 {
29
30 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
31 k := (*stringStruct)(kptr)
32 if k.len != key.len || isEmpty(b.tophash[i]) {
33 if b.tophash[i] == emptyRest {
34 break
35 }
36 continue
37 }
38 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
39 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
40 }
41 }
42 return unsafe.Pointer(&zeroVal[0])
43 }
44
45 keymaybe := uintptr(abi.MapBucketCount)
46 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
47 k := (*stringStruct)(kptr)
48 if k.len != key.len || isEmpty(b.tophash[i]) {
49 if b.tophash[i] == emptyRest {
50 break
51 }
52 continue
53 }
54 if k.str == key.str {
55 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
56 }
57
58 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
59 continue
60 }
61
62 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
63 continue
64 }
65 if keymaybe != abi.MapBucketCount {
66
67 goto dohash
68 }
69 keymaybe = i
70 }
71 if keymaybe != abi.MapBucketCount {
72 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
73 if memequal(k.str, key.str, uintptr(key.len)) {
74 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
75 }
76 }
77 return unsafe.Pointer(&zeroVal[0])
78 }
79 dohash:
80 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
81 m := bucketMask(h.B)
82 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
83 if c := h.oldbuckets; c != nil {
84 if !h.sameSizeGrow() {
85
86 m >>= 1
87 }
88 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
89 if !evacuated(oldb) {
90 b = oldb
91 }
92 }
93 top := tophash(hash)
94 for ; b != nil; b = b.overflow(t) {
95 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
96 k := (*stringStruct)(kptr)
97 if k.len != key.len || b.tophash[i] != top {
98 continue
99 }
100 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
101 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
102 }
103 }
104 }
105 return unsafe.Pointer(&zeroVal[0])
106 }
107
108
109
110
111
112
113
114
115
116
117 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
118 if raceenabled && h != nil {
119 callerpc := getcallerpc()
120 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
121 }
122 if h == nil || h.count == 0 {
123 return unsafe.Pointer(&zeroVal[0]), false
124 }
125 if h.flags&hashWriting != 0 {
126 fatal("concurrent map read and map write")
127 }
128 key := stringStructOf(&ky)
129 if h.B == 0 {
130
131 b := (*bmap)(h.buckets)
132 if key.len < 32 {
133
134 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
135 k := (*stringStruct)(kptr)
136 if k.len != key.len || isEmpty(b.tophash[i]) {
137 if b.tophash[i] == emptyRest {
138 break
139 }
140 continue
141 }
142 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
143 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
144 }
145 }
146 return unsafe.Pointer(&zeroVal[0]), false
147 }
148
149 keymaybe := uintptr(abi.MapBucketCount)
150 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
151 k := (*stringStruct)(kptr)
152 if k.len != key.len || isEmpty(b.tophash[i]) {
153 if b.tophash[i] == emptyRest {
154 break
155 }
156 continue
157 }
158 if k.str == key.str {
159 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
160 }
161
162 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
163 continue
164 }
165
166 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
167 continue
168 }
169 if keymaybe != abi.MapBucketCount {
170
171 goto dohash
172 }
173 keymaybe = i
174 }
175 if keymaybe != abi.MapBucketCount {
176 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
177 if memequal(k.str, key.str, uintptr(key.len)) {
178 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
179 }
180 }
181 return unsafe.Pointer(&zeroVal[0]), false
182 }
183 dohash:
184 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
185 m := bucketMask(h.B)
186 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
187 if c := h.oldbuckets; c != nil {
188 if !h.sameSizeGrow() {
189
190 m >>= 1
191 }
192 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
193 if !evacuated(oldb) {
194 b = oldb
195 }
196 }
197 top := tophash(hash)
198 for ; b != nil; b = b.overflow(t) {
199 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
200 k := (*stringStruct)(kptr)
201 if k.len != key.len || b.tophash[i] != top {
202 continue
203 }
204 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
205 return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
206 }
207 }
208 }
209 return unsafe.Pointer(&zeroVal[0]), false
210 }
211
212
213
214
215
216
217
218
219
220
221
222
223 func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
224 if h == nil {
225 panic(plainError("assignment to entry in nil map"))
226 }
227 if raceenabled {
228 callerpc := getcallerpc()
229 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
230 }
231 if h.flags&hashWriting != 0 {
232 fatal("concurrent map writes")
233 }
234 key := stringStructOf(&s)
235 hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
236
237
238 h.flags ^= hashWriting
239
240 if h.buckets == nil {
241 h.buckets = newobject(t.Bucket)
242 }
243
244 again:
245 bucket := hash & bucketMask(h.B)
246 if h.growing() {
247 growWork_faststr(t, h, bucket)
248 }
249 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
250 top := tophash(hash)
251
252 var insertb *bmap
253 var inserti uintptr
254 var insertk unsafe.Pointer
255
256 bucketloop:
257 for {
258 for i := uintptr(0); i < abi.MapBucketCount; i++ {
259 if b.tophash[i] != top {
260 if isEmpty(b.tophash[i]) && insertb == nil {
261 insertb = b
262 inserti = i
263 }
264 if b.tophash[i] == emptyRest {
265 break bucketloop
266 }
267 continue
268 }
269 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
270 if k.len != key.len {
271 continue
272 }
273 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
274 continue
275 }
276
277 inserti = i
278 insertb = b
279
280
281 k.str = key.str
282 goto done
283 }
284 ovf := b.overflow(t)
285 if ovf == nil {
286 break
287 }
288 b = ovf
289 }
290
291
292
293
294
295 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
296 hashGrow(t, h)
297 goto again
298 }
299
300 if insertb == nil {
301
302 insertb = h.newoverflow(t, b)
303 inserti = 0
304 }
305 insertb.tophash[inserti&(abi.MapBucketCount-1)] = top
306
307 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
308
309 *((*stringStruct)(insertk)) = *key
310 h.count++
311
312 done:
313 elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
314 if h.flags&hashWriting == 0 {
315 fatal("concurrent map writes")
316 }
317 h.flags &^= hashWriting
318 return elem
319 }
320
321 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
322 if raceenabled && h != nil {
323 callerpc := getcallerpc()
324 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
325 }
326 if h == nil || h.count == 0 {
327 return
328 }
329 if h.flags&hashWriting != 0 {
330 fatal("concurrent map writes")
331 }
332
333 key := stringStructOf(&ky)
334 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
335
336
337 h.flags ^= hashWriting
338
339 bucket := hash & bucketMask(h.B)
340 if h.growing() {
341 growWork_faststr(t, h, bucket)
342 }
343 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
344 bOrig := b
345 top := tophash(hash)
346 search:
347 for ; b != nil; b = b.overflow(t) {
348 for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
349 k := (*stringStruct)(kptr)
350 if k.len != key.len || b.tophash[i] != top {
351 continue
352 }
353 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
354 continue
355 }
356
357 k.str = nil
358 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
359 if t.Elem.Pointers() {
360 memclrHasPointers(e, t.Elem.Size_)
361 } else {
362 memclrNoHeapPointers(e, t.Elem.Size_)
363 }
364 b.tophash[i] = emptyOne
365
366
367 if i == abi.MapBucketCount-1 {
368 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
369 goto notLast
370 }
371 } else {
372 if b.tophash[i+1] != emptyRest {
373 goto notLast
374 }
375 }
376 for {
377 b.tophash[i] = emptyRest
378 if i == 0 {
379 if b == bOrig {
380 break
381 }
382
383 c := b
384 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
385 }
386 i = abi.MapBucketCount - 1
387 } else {
388 i--
389 }
390 if b.tophash[i] != emptyOne {
391 break
392 }
393 }
394 notLast:
395 h.count--
396
397
398 if h.count == 0 {
399 h.hash0 = uint32(rand())
400 }
401 break search
402 }
403 }
404
405 if h.flags&hashWriting == 0 {
406 fatal("concurrent map writes")
407 }
408 h.flags &^= hashWriting
409 }
410
411 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
412
413
414 evacuate_faststr(t, h, bucket&h.oldbucketmask())
415
416
417 if h.growing() {
418 evacuate_faststr(t, h, h.nevacuate)
419 }
420 }
421
422 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
423 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
424 newbit := h.noldbuckets()
425 if !evacuated(b) {
426
427
428
429
430 var xy [2]evacDst
431 x := &xy[0]
432 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
433 x.k = add(unsafe.Pointer(x.b), dataOffset)
434 x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
435
436 if !h.sameSizeGrow() {
437
438
439 y := &xy[1]
440 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
441 y.k = add(unsafe.Pointer(y.b), dataOffset)
442 y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
443 }
444
445 for ; b != nil; b = b.overflow(t) {
446 k := add(unsafe.Pointer(b), dataOffset)
447 e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
448 for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
449 top := b.tophash[i]
450 if isEmpty(top) {
451 b.tophash[i] = evacuatedEmpty
452 continue
453 }
454 if top < minTopHash {
455 throw("bad map state")
456 }
457 var useY uint8
458 if !h.sameSizeGrow() {
459
460
461 hash := t.Hasher(k, uintptr(h.hash0))
462 if hash&newbit != 0 {
463 useY = 1
464 }
465 }
466
467 b.tophash[i] = evacuatedX + useY
468 dst := &xy[useY]
469
470 if dst.i == abi.MapBucketCount {
471 dst.b = h.newoverflow(t, dst.b)
472 dst.i = 0
473 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
474 dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
475 }
476 dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top
477
478
479 *(*string)(dst.k) = *(*string)(k)
480
481 typedmemmove(t.Elem, dst.e, e)
482 dst.i++
483
484
485
486
487 dst.k = add(dst.k, 2*goarch.PtrSize)
488 dst.e = add(dst.e, uintptr(t.ValueSize))
489 }
490 }
491
492 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
493 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
494
495
496 ptr := add(b, dataOffset)
497 n := uintptr(t.BucketSize) - dataOffset
498 memclrHasPointers(ptr, n)
499 }
500 }
501
502 if oldbucket == h.nevacuate {
503 advanceEvacuationMark(h, t, newbit)
504 }
505 }
506
View as plain text