Source file src/runtime/map_faststr_noswiss.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !goexperiment.swissmap
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
    17  	if raceenabled && h != nil {
    18  		callerpc := sys.GetCallerPC()
    19  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
    20  	}
    21  	if h == nil || h.count == 0 {
    22  		return unsafe.Pointer(&zeroVal[0])
    23  	}
    24  	if h.flags&hashWriting != 0 {
    25  		fatal("concurrent map read and map write")
    26  	}
    27  	key := stringStructOf(&ky)
    28  	if h.B == 0 {
    29  		// One-bucket table.
    30  		b := (*bmap)(h.buckets)
    31  		if key.len < 32 {
    32  			// short key, doing lots of comparisons is ok
    33  			for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    34  				k := (*stringStruct)(kptr)
    35  				if k.len != key.len || isEmpty(b.tophash[i]) {
    36  					if b.tophash[i] == emptyRest {
    37  						break
    38  					}
    39  					continue
    40  				}
    41  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    42  					return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
    43  				}
    44  			}
    45  			return unsafe.Pointer(&zeroVal[0])
    46  		}
    47  		// long key, try not to do more comparisons than necessary
    48  		keymaybe := uintptr(abi.OldMapBucketCount)
    49  		for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    50  			k := (*stringStruct)(kptr)
    51  			if k.len != key.len || isEmpty(b.tophash[i]) {
    52  				if b.tophash[i] == emptyRest {
    53  					break
    54  				}
    55  				continue
    56  			}
    57  			if k.str == key.str {
    58  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
    59  			}
    60  			// check first 4 bytes
    61  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
    62  				continue
    63  			}
    64  			// check last 4 bytes
    65  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
    66  				continue
    67  			}
    68  			if keymaybe != abi.OldMapBucketCount {
    69  				// Two keys are potential matches. Use hash to distinguish them.
    70  				goto dohash
    71  			}
    72  			keymaybe = i
    73  		}
    74  		if keymaybe != abi.OldMapBucketCount {
    75  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
    76  			if memequal(k.str, key.str, uintptr(key.len)) {
    77  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
    78  			}
    79  		}
    80  		return unsafe.Pointer(&zeroVal[0])
    81  	}
    82  dohash:
    83  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
    84  	m := bucketMask(h.B)
    85  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    86  	if c := h.oldbuckets; c != nil {
    87  		if !h.sameSizeGrow() {
    88  			// There used to be half as many buckets; mask down one more power of two.
    89  			m >>= 1
    90  		}
    91  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    92  		if !evacuated(oldb) {
    93  			b = oldb
    94  		}
    95  	}
    96  	top := tophash(hash)
    97  	for ; b != nil; b = b.overflow(t) {
    98  		for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
    99  			k := (*stringStruct)(kptr)
   100  			if k.len != key.len || b.tophash[i] != top {
   101  				continue
   102  			}
   103  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   104  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
   105  			}
   106  		}
   107  	}
   108  	return unsafe.Pointer(&zeroVal[0])
   109  }
   110  
   111  // mapaccess2_faststr should be an internal detail,
   112  // but widely used packages access it using linkname.
   113  // Notable members of the hall of shame include:
   114  //   - github.com/ugorji/go/codec
   115  //
   116  // Do not remove or change the type signature.
   117  // See go.dev/issue/67401.
   118  //
   119  //go:linkname mapaccess2_faststr
   120  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   121  	if raceenabled && h != nil {
   122  		callerpc := sys.GetCallerPC()
   123  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
   124  	}
   125  	if h == nil || h.count == 0 {
   126  		return unsafe.Pointer(&zeroVal[0]), false
   127  	}
   128  	if h.flags&hashWriting != 0 {
   129  		fatal("concurrent map read and map write")
   130  	}
   131  	key := stringStructOf(&ky)
   132  	if h.B == 0 {
   133  		// One-bucket table.
   134  		b := (*bmap)(h.buckets)
   135  		if key.len < 32 {
   136  			// short key, doing lots of comparisons is ok
   137  			for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   138  				k := (*stringStruct)(kptr)
   139  				if k.len != key.len || isEmpty(b.tophash[i]) {
   140  					if b.tophash[i] == emptyRest {
   141  						break
   142  					}
   143  					continue
   144  				}
   145  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   146  					return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   147  				}
   148  			}
   149  			return unsafe.Pointer(&zeroVal[0]), false
   150  		}
   151  		// long key, try not to do more comparisons than necessary
   152  		keymaybe := uintptr(abi.OldMapBucketCount)
   153  		for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   154  			k := (*stringStruct)(kptr)
   155  			if k.len != key.len || isEmpty(b.tophash[i]) {
   156  				if b.tophash[i] == emptyRest {
   157  					break
   158  				}
   159  				continue
   160  			}
   161  			if k.str == key.str {
   162  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   163  			}
   164  			// check first 4 bytes
   165  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   166  				continue
   167  			}
   168  			// check last 4 bytes
   169  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   170  				continue
   171  			}
   172  			if keymaybe != abi.OldMapBucketCount {
   173  				// Two keys are potential matches. Use hash to distinguish them.
   174  				goto dohash
   175  			}
   176  			keymaybe = i
   177  		}
   178  		if keymaybe != abi.OldMapBucketCount {
   179  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
   180  			if memequal(k.str, key.str, uintptr(key.len)) {
   181  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
   182  			}
   183  		}
   184  		return unsafe.Pointer(&zeroVal[0]), false
   185  	}
   186  dohash:
   187  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   188  	m := bucketMask(h.B)
   189  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   190  	if c := h.oldbuckets; c != nil {
   191  		if !h.sameSizeGrow() {
   192  			// There used to be half as many buckets; mask down one more power of two.
   193  			m >>= 1
   194  		}
   195  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   196  		if !evacuated(oldb) {
   197  			b = oldb
   198  		}
   199  	}
   200  	top := tophash(hash)
   201  	for ; b != nil; b = b.overflow(t) {
   202  		for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   203  			k := (*stringStruct)(kptr)
   204  			if k.len != key.len || b.tophash[i] != top {
   205  				continue
   206  			}
   207  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   208  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
   209  			}
   210  		}
   211  	}
   212  	return unsafe.Pointer(&zeroVal[0]), false
   213  }
   214  
   215  // mapassign_faststr should be an internal detail,
   216  // but widely used packages access it using linkname.
   217  // Notable members of the hall of shame include:
   218  //   - github.com/bytedance/sonic
   219  //   - github.com/ugorji/go/codec
   220  //
   221  // Do not remove or change the type signature.
   222  // See go.dev/issue/67401.
   223  //
   224  //go:linkname mapassign_faststr
   225  func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
   226  	if h == nil {
   227  		panic(plainError("assignment to entry in nil map"))
   228  	}
   229  	if raceenabled {
   230  		callerpc := sys.GetCallerPC()
   231  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
   232  	}
   233  	if h.flags&hashWriting != 0 {
   234  		fatal("concurrent map writes")
   235  	}
   236  	key := stringStructOf(&s)
   237  	hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
   238  
   239  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   240  	h.flags ^= hashWriting
   241  
   242  	if h.buckets == nil {
   243  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   244  	}
   245  
   246  again:
   247  	bucket := hash & bucketMask(h.B)
   248  	if h.growing() {
   249  		growWork_faststr(t, h, bucket)
   250  	}
   251  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   252  	top := tophash(hash)
   253  
   254  	var insertb *bmap
   255  	var inserti uintptr
   256  	var insertk unsafe.Pointer
   257  
   258  bucketloop:
   259  	for {
   260  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   261  			if b.tophash[i] != top {
   262  				if isEmpty(b.tophash[i]) && insertb == nil {
   263  					insertb = b
   264  					inserti = i
   265  				}
   266  				if b.tophash[i] == emptyRest {
   267  					break bucketloop
   268  				}
   269  				continue
   270  			}
   271  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
   272  			if k.len != key.len {
   273  				continue
   274  			}
   275  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   276  				continue
   277  			}
   278  			// already have a mapping for key. Update it.
   279  			inserti = i
   280  			insertb = b
   281  			// Overwrite existing key, so it can be garbage collected.
   282  			// The size is already guaranteed to be set correctly.
   283  			k.str = key.str
   284  			goto done
   285  		}
   286  		ovf := b.overflow(t)
   287  		if ovf == nil {
   288  			break
   289  		}
   290  		b = ovf
   291  	}
   292  
   293  	// Did not find mapping for key. Allocate new cell & add entry.
   294  
   295  	// If we hit the max load factor or we have too many overflow buckets,
   296  	// and we're not already in the middle of growing, start growing.
   297  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   298  		hashGrow(t, h)
   299  		goto again // Growing the table invalidates everything, so try again
   300  	}
   301  
   302  	if insertb == nil {
   303  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   304  		insertb = h.newoverflow(t, b)
   305  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   306  	}
   307  	insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks
   308  
   309  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
   310  	// store new key at insert position
   311  	*((*stringStruct)(insertk)) = *key
   312  	h.count++
   313  
   314  done:
   315  	elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
   316  	if h.flags&hashWriting == 0 {
   317  		fatal("concurrent map writes")
   318  	}
   319  	h.flags &^= hashWriting
   320  	return elem
   321  }
   322  
   323  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   324  	if raceenabled && h != nil {
   325  		callerpc := sys.GetCallerPC()
   326  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
   327  	}
   328  	if h == nil || h.count == 0 {
   329  		return
   330  	}
   331  	if h.flags&hashWriting != 0 {
   332  		fatal("concurrent map writes")
   333  	}
   334  
   335  	key := stringStructOf(&ky)
   336  	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   337  
   338  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   339  	h.flags ^= hashWriting
   340  
   341  	bucket := hash & bucketMask(h.B)
   342  	if h.growing() {
   343  		growWork_faststr(t, h, bucket)
   344  	}
   345  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   346  	bOrig := b
   347  	top := tophash(hash)
   348  search:
   349  	for ; b != nil; b = b.overflow(t) {
   350  		for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
   351  			k := (*stringStruct)(kptr)
   352  			if k.len != key.len || b.tophash[i] != top {
   353  				continue
   354  			}
   355  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   356  				continue
   357  			}
   358  			// Clear key's pointer.
   359  			k.str = nil
   360  			e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
   361  			if t.Elem.Pointers() {
   362  				memclrHasPointers(e, t.Elem.Size_)
   363  			} else {
   364  				memclrNoHeapPointers(e, t.Elem.Size_)
   365  			}
   366  			b.tophash[i] = emptyOne
   367  			// If the bucket now ends in a bunch of emptyOne states,
   368  			// change those to emptyRest states.
   369  			if i == abi.OldMapBucketCount-1 {
   370  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   371  					goto notLast
   372  				}
   373  			} else {
   374  				if b.tophash[i+1] != emptyRest {
   375  					goto notLast
   376  				}
   377  			}
   378  			for {
   379  				b.tophash[i] = emptyRest
   380  				if i == 0 {
   381  					if b == bOrig {
   382  						break // beginning of initial bucket, we're done.
   383  					}
   384  					// Find previous bucket, continue at its last entry.
   385  					c := b
   386  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   387  					}
   388  					i = abi.OldMapBucketCount - 1
   389  				} else {
   390  					i--
   391  				}
   392  				if b.tophash[i] != emptyOne {
   393  					break
   394  				}
   395  			}
   396  		notLast:
   397  			h.count--
   398  			// Reset the hash seed to make it more difficult for attackers to
   399  			// repeatedly trigger hash collisions. See issue 25237.
   400  			if h.count == 0 {
   401  				h.hash0 = uint32(rand())
   402  			}
   403  			break search
   404  		}
   405  	}
   406  
   407  	if h.flags&hashWriting == 0 {
   408  		fatal("concurrent map writes")
   409  	}
   410  	h.flags &^= hashWriting
   411  }
   412  
   413  func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
   414  	// make sure we evacuate the oldbucket corresponding
   415  	// to the bucket we're about to use
   416  	evacuate_faststr(t, h, bucket&h.oldbucketmask())
   417  
   418  	// evacuate one more oldbucket to make progress on growing
   419  	if h.growing() {
   420  		evacuate_faststr(t, h, h.nevacuate)
   421  	}
   422  }
   423  
   424  func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
   425  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   426  	newbit := h.noldbuckets()
   427  	if !evacuated(b) {
   428  		// TODO: reuse overflow buckets instead of using new ones, if there
   429  		// is no iterator using the old buckets.  (If !oldIterator.)
   430  
   431  		// xy contains the x and y (low and high) evacuation destinations.
   432  		var xy [2]evacDst
   433  		x := &xy[0]
   434  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
   435  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   436  		x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
   437  
   438  		if !h.sameSizeGrow() {
   439  			// Only calculate y pointers if we're growing bigger.
   440  			// Otherwise GC can see bad pointers.
   441  			y := &xy[1]
   442  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
   443  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   444  			y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
   445  		}
   446  
   447  		for ; b != nil; b = b.overflow(t) {
   448  			k := add(unsafe.Pointer(b), dataOffset)
   449  			e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
   450  			for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
   451  				top := b.tophash[i]
   452  				if isEmpty(top) {
   453  					b.tophash[i] = evacuatedEmpty
   454  					continue
   455  				}
   456  				if top < minTopHash {
   457  					throw("bad map state")
   458  				}
   459  				var useY uint8
   460  				if !h.sameSizeGrow() {
   461  					// Compute hash to make our evacuation decision (whether we need
   462  					// to send this key/elem to bucket x or bucket y).
   463  					hash := t.Hasher(k, uintptr(h.hash0))
   464  					if hash&newbit != 0 {
   465  						useY = 1
   466  					}
   467  				}
   468  
   469  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   470  				dst := &xy[useY]                 // evacuation destination
   471  
   472  				if dst.i == abi.OldMapBucketCount {
   473  					dst.b = h.newoverflow(t, dst.b)
   474  					dst.i = 0
   475  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   476  					dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
   477  				}
   478  				dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   479  
   480  				// Copy key.
   481  				*(*string)(dst.k) = *(*string)(k)
   482  
   483  				typedmemmove(t.Elem, dst.e, e)
   484  				dst.i++
   485  				// These updates might push these pointers past the end of the
   486  				// key or elem arrays.  That's ok, as we have the overflow pointer
   487  				// at the end of the bucket to protect against pointing past the
   488  				// end of the bucket.
   489  				dst.k = add(dst.k, 2*goarch.PtrSize)
   490  				dst.e = add(dst.e, uintptr(t.ValueSize))
   491  			}
   492  		}
   493  		// Unlink the overflow buckets & clear key/elem to help GC.
   494  		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
   495  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
   496  			// Preserve b.tophash because the evacuation
   497  			// state is maintained there.
   498  			ptr := add(b, dataOffset)
   499  			n := uintptr(t.BucketSize) - dataOffset
   500  			memclrHasPointers(ptr, n)
   501  		}
   502  	}
   503  
   504  	if oldbucket == h.nevacuate {
   505  		advanceEvacuationMark(h, t, newbit)
   506  	}
   507  }
   508  

View as plain text