Source file
src/runtime/map_faststr_noswiss.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/sys"
13 "unsafe"
14 )
15
16 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
17 if raceenabled && h != nil {
18 callerpc := sys.GetCallerPC()
19 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
20 }
21 if h == nil || h.count == 0 {
22 return unsafe.Pointer(&zeroVal[0])
23 }
24 if h.flags&hashWriting != 0 {
25 fatal("concurrent map read and map write")
26 }
27 key := stringStructOf(&ky)
28 if h.B == 0 {
29
30 b := (*bmap)(h.buckets)
31 if key.len < 32 {
32
33 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
34 k := (*stringStruct)(kptr)
35 if k.len != key.len || isEmpty(b.tophash[i]) {
36 if b.tophash[i] == emptyRest {
37 break
38 }
39 continue
40 }
41 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
42 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
43 }
44 }
45 return unsafe.Pointer(&zeroVal[0])
46 }
47
48 keymaybe := uintptr(abi.OldMapBucketCount)
49 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
50 k := (*stringStruct)(kptr)
51 if k.len != key.len || isEmpty(b.tophash[i]) {
52 if b.tophash[i] == emptyRest {
53 break
54 }
55 continue
56 }
57 if k.str == key.str {
58 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
59 }
60
61 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
62 continue
63 }
64
65 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
66 continue
67 }
68 if keymaybe != abi.OldMapBucketCount {
69
70 goto dohash
71 }
72 keymaybe = i
73 }
74 if keymaybe != abi.OldMapBucketCount {
75 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
76 if memequal(k.str, key.str, uintptr(key.len)) {
77 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
78 }
79 }
80 return unsafe.Pointer(&zeroVal[0])
81 }
82 dohash:
83 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
84 m := bucketMask(h.B)
85 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
86 if c := h.oldbuckets; c != nil {
87 if !h.sameSizeGrow() {
88
89 m >>= 1
90 }
91 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
92 if !evacuated(oldb) {
93 b = oldb
94 }
95 }
96 top := tophash(hash)
97 for ; b != nil; b = b.overflow(t) {
98 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
99 k := (*stringStruct)(kptr)
100 if k.len != key.len || b.tophash[i] != top {
101 continue
102 }
103 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
104 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
105 }
106 }
107 }
108 return unsafe.Pointer(&zeroVal[0])
109 }
110
111
112
113
114
115
116
117
118
119
120 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
121 if raceenabled && h != nil {
122 callerpc := sys.GetCallerPC()
123 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
124 }
125 if h == nil || h.count == 0 {
126 return unsafe.Pointer(&zeroVal[0]), false
127 }
128 if h.flags&hashWriting != 0 {
129 fatal("concurrent map read and map write")
130 }
131 key := stringStructOf(&ky)
132 if h.B == 0 {
133
134 b := (*bmap)(h.buckets)
135 if key.len < 32 {
136
137 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
138 k := (*stringStruct)(kptr)
139 if k.len != key.len || isEmpty(b.tophash[i]) {
140 if b.tophash[i] == emptyRest {
141 break
142 }
143 continue
144 }
145 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
146 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
147 }
148 }
149 return unsafe.Pointer(&zeroVal[0]), false
150 }
151
152 keymaybe := uintptr(abi.OldMapBucketCount)
153 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
154 k := (*stringStruct)(kptr)
155 if k.len != key.len || isEmpty(b.tophash[i]) {
156 if b.tophash[i] == emptyRest {
157 break
158 }
159 continue
160 }
161 if k.str == key.str {
162 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
163 }
164
165 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
166 continue
167 }
168
169 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
170 continue
171 }
172 if keymaybe != abi.OldMapBucketCount {
173
174 goto dohash
175 }
176 keymaybe = i
177 }
178 if keymaybe != abi.OldMapBucketCount {
179 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
180 if memequal(k.str, key.str, uintptr(key.len)) {
181 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
182 }
183 }
184 return unsafe.Pointer(&zeroVal[0]), false
185 }
186 dohash:
187 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
188 m := bucketMask(h.B)
189 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
190 if c := h.oldbuckets; c != nil {
191 if !h.sameSizeGrow() {
192
193 m >>= 1
194 }
195 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
196 if !evacuated(oldb) {
197 b = oldb
198 }
199 }
200 top := tophash(hash)
201 for ; b != nil; b = b.overflow(t) {
202 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
203 k := (*stringStruct)(kptr)
204 if k.len != key.len || b.tophash[i] != top {
205 continue
206 }
207 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
208 return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
209 }
210 }
211 }
212 return unsafe.Pointer(&zeroVal[0]), false
213 }
214
215
216
217
218
219
220
221
222
223
224
225 func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
226 if h == nil {
227 panic(plainError("assignment to entry in nil map"))
228 }
229 if raceenabled {
230 callerpc := sys.GetCallerPC()
231 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
232 }
233 if h.flags&hashWriting != 0 {
234 fatal("concurrent map writes")
235 }
236 key := stringStructOf(&s)
237 hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
238
239
240 h.flags ^= hashWriting
241
242 if h.buckets == nil {
243 h.buckets = newobject(t.Bucket)
244 }
245
246 again:
247 bucket := hash & bucketMask(h.B)
248 if h.growing() {
249 growWork_faststr(t, h, bucket)
250 }
251 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
252 top := tophash(hash)
253
254 var insertb *bmap
255 var inserti uintptr
256 var insertk unsafe.Pointer
257
258 bucketloop:
259 for {
260 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
261 if b.tophash[i] != top {
262 if isEmpty(b.tophash[i]) && insertb == nil {
263 insertb = b
264 inserti = i
265 }
266 if b.tophash[i] == emptyRest {
267 break bucketloop
268 }
269 continue
270 }
271 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
272 if k.len != key.len {
273 continue
274 }
275 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
276 continue
277 }
278
279 inserti = i
280 insertb = b
281
282
283 k.str = key.str
284 goto done
285 }
286 ovf := b.overflow(t)
287 if ovf == nil {
288 break
289 }
290 b = ovf
291 }
292
293
294
295
296
297 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
298 hashGrow(t, h)
299 goto again
300 }
301
302 if insertb == nil {
303
304 insertb = h.newoverflow(t, b)
305 inserti = 0
306 }
307 insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top
308
309 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
310
311 *((*stringStruct)(insertk)) = *key
312 h.count++
313
314 done:
315 elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
316 if h.flags&hashWriting == 0 {
317 fatal("concurrent map writes")
318 }
319 h.flags &^= hashWriting
320 return elem
321 }
322
323 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
324 if raceenabled && h != nil {
325 callerpc := sys.GetCallerPC()
326 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
327 }
328 if h == nil || h.count == 0 {
329 return
330 }
331 if h.flags&hashWriting != 0 {
332 fatal("concurrent map writes")
333 }
334
335 key := stringStructOf(&ky)
336 hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
337
338
339 h.flags ^= hashWriting
340
341 bucket := hash & bucketMask(h.B)
342 if h.growing() {
343 growWork_faststr(t, h, bucket)
344 }
345 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
346 bOrig := b
347 top := tophash(hash)
348 search:
349 for ; b != nil; b = b.overflow(t) {
350 for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
351 k := (*stringStruct)(kptr)
352 if k.len != key.len || b.tophash[i] != top {
353 continue
354 }
355 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
356 continue
357 }
358
359 k.str = nil
360 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
361 if t.Elem.Pointers() {
362 memclrHasPointers(e, t.Elem.Size_)
363 } else {
364 memclrNoHeapPointers(e, t.Elem.Size_)
365 }
366 b.tophash[i] = emptyOne
367
368
369 if i == abi.OldMapBucketCount-1 {
370 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
371 goto notLast
372 }
373 } else {
374 if b.tophash[i+1] != emptyRest {
375 goto notLast
376 }
377 }
378 for {
379 b.tophash[i] = emptyRest
380 if i == 0 {
381 if b == bOrig {
382 break
383 }
384
385 c := b
386 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
387 }
388 i = abi.OldMapBucketCount - 1
389 } else {
390 i--
391 }
392 if b.tophash[i] != emptyOne {
393 break
394 }
395 }
396 notLast:
397 h.count--
398
399
400 if h.count == 0 {
401 h.hash0 = uint32(rand())
402 }
403 break search
404 }
405 }
406
407 if h.flags&hashWriting == 0 {
408 fatal("concurrent map writes")
409 }
410 h.flags &^= hashWriting
411 }
412
413 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
414
415
416 evacuate_faststr(t, h, bucket&h.oldbucketmask())
417
418
419 if h.growing() {
420 evacuate_faststr(t, h, h.nevacuate)
421 }
422 }
423
424 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
425 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
426 newbit := h.noldbuckets()
427 if !evacuated(b) {
428
429
430
431
432 var xy [2]evacDst
433 x := &xy[0]
434 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
435 x.k = add(unsafe.Pointer(x.b), dataOffset)
436 x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
437
438 if !h.sameSizeGrow() {
439
440
441 y := &xy[1]
442 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
443 y.k = add(unsafe.Pointer(y.b), dataOffset)
444 y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
445 }
446
447 for ; b != nil; b = b.overflow(t) {
448 k := add(unsafe.Pointer(b), dataOffset)
449 e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
450 for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
451 top := b.tophash[i]
452 if isEmpty(top) {
453 b.tophash[i] = evacuatedEmpty
454 continue
455 }
456 if top < minTopHash {
457 throw("bad map state")
458 }
459 var useY uint8
460 if !h.sameSizeGrow() {
461
462
463 hash := t.Hasher(k, uintptr(h.hash0))
464 if hash&newbit != 0 {
465 useY = 1
466 }
467 }
468
469 b.tophash[i] = evacuatedX + useY
470 dst := &xy[useY]
471
472 if dst.i == abi.OldMapBucketCount {
473 dst.b = h.newoverflow(t, dst.b)
474 dst.i = 0
475 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
476 dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
477 }
478 dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top
479
480
481 *(*string)(dst.k) = *(*string)(k)
482
483 typedmemmove(t.Elem, dst.e, e)
484 dst.i++
485
486
487
488
489 dst.k = add(dst.k, 2*goarch.PtrSize)
490 dst.e = add(dst.e, uintptr(t.ValueSize))
491 }
492 }
493
494 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
495 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
496
497
498 ptr := add(b, dataOffset)
499 n := uintptr(t.BucketSize) - dataOffset
500 memclrHasPointers(ptr, n)
501 }
502 }
503
504 if oldbucket == h.nevacuate {
505 advanceEvacuationMark(h, t, newbit)
506 }
507 }
508
View as plain text