Source file src/runtime/map_fast32_noswiss.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !goexperiment.swissmap
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
    17  	if raceenabled && h != nil {
    18  		callerpc := sys.GetCallerPC()
    19  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
    20  	}
    21  	if h == nil || h.count == 0 {
    22  		return unsafe.Pointer(&zeroVal[0])
    23  	}
    24  	if h.flags&hashWriting != 0 {
    25  		fatal("concurrent map read and map write")
    26  	}
    27  	var b *bmap
    28  	if h.B == 0 {
    29  		// One-bucket table. No need to hash.
    30  		b = (*bmap)(h.buckets)
    31  	} else {
    32  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    33  		m := bucketMask(h.B)
    34  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    35  		if c := h.oldbuckets; c != nil {
    36  			if !h.sameSizeGrow() {
    37  				// There used to be half as many buckets; mask down one more power of two.
    38  				m >>= 1
    39  			}
    40  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    41  			if !evacuated(oldb) {
    42  				b = oldb
    43  			}
    44  		}
    45  	}
    46  	for ; b != nil; b = b.overflow(t) {
    47  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
    48  			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
    49  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
    50  			}
    51  		}
    52  	}
    53  	return unsafe.Pointer(&zeroVal[0])
    54  }
    55  
    56  // mapaccess2_fast32 should be an internal detail,
    57  // but widely used packages access it using linkname.
    58  // Notable members of the hall of shame include:
    59  //   - github.com/ugorji/go/codec
    60  //
    61  // Do not remove or change the type signature.
    62  // See go.dev/issue/67401.
    63  //
    64  //go:linkname mapaccess2_fast32
    65  func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
    66  	if raceenabled && h != nil {
    67  		callerpc := sys.GetCallerPC()
    68  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
    69  	}
    70  	if h == nil || h.count == 0 {
    71  		return unsafe.Pointer(&zeroVal[0]), false
    72  	}
    73  	if h.flags&hashWriting != 0 {
    74  		fatal("concurrent map read and map write")
    75  	}
    76  	var b *bmap
    77  	if h.B == 0 {
    78  		// One-bucket table. No need to hash.
    79  		b = (*bmap)(h.buckets)
    80  	} else {
    81  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    82  		m := bucketMask(h.B)
    83  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    84  		if c := h.oldbuckets; c != nil {
    85  			if !h.sameSizeGrow() {
    86  				// There used to be half as many buckets; mask down one more power of two.
    87  				m >>= 1
    88  			}
    89  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    90  			if !evacuated(oldb) {
    91  				b = oldb
    92  			}
    93  		}
    94  	}
    95  	for ; b != nil; b = b.overflow(t) {
    96  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
    97  			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
    98  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
    99  			}
   100  		}
   101  	}
   102  	return unsafe.Pointer(&zeroVal[0]), false
   103  }
   104  
   105  // mapassign_fast32 should be an internal detail,
   106  // but widely used packages access it using linkname.
   107  // Notable members of the hall of shame include:
   108  //   - github.com/bytedance/sonic
   109  //   - github.com/ugorji/go/codec
   110  //
   111  // Do not remove or change the type signature.
   112  // See go.dev/issue/67401.
   113  //
   114  //go:linkname mapassign_fast32
   115  func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
   116  	if h == nil {
   117  		panic(plainError("assignment to entry in nil map"))
   118  	}
   119  	if raceenabled {
   120  		callerpc := sys.GetCallerPC()
   121  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
   122  	}
   123  	if h.flags&hashWriting != 0 {
   124  		fatal("concurrent map writes")
   125  	}
   126  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   127  
   128  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   129  	h.flags ^= hashWriting
   130  
   131  	if h.buckets == nil {
   132  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   133  	}
   134  
   135  again:
   136  	bucket := hash & bucketMask(h.B)
   137  	if h.growing() {
   138  		growWork_fast32(t, h, bucket)
   139  	}
   140  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   141  
   142  	var insertb *bmap
   143  	var inserti uintptr
   144  	var insertk unsafe.Pointer
   145  
   146  bucketloop:
   147  	for {
   148  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   149  			if isEmpty(b.tophash[i]) {
   150  				if insertb == nil {
   151  					inserti = i
   152  					insertb = b
   153  				}
   154  				if b.tophash[i] == emptyRest {
   155  					break bucketloop
   156  				}
   157  				continue
   158  			}
   159  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
   160  			if k != key {
   161  				continue
   162  			}
   163  			inserti = i
   164  			insertb = b
   165  			goto done
   166  		}
   167  		ovf := b.overflow(t)
   168  		if ovf == nil {
   169  			break
   170  		}
   171  		b = ovf
   172  	}
   173  
   174  	// Did not find mapping for key. Allocate new cell & add entry.
   175  
   176  	// If we hit the max load factor or we have too many overflow buckets,
   177  	// and we're not already in the middle of growing, start growing.
   178  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   179  		hashGrow(t, h)
   180  		goto again // Growing the table invalidates everything, so try again
   181  	}
   182  
   183  	if insertb == nil {
   184  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   185  		insertb = h.newoverflow(t, b)
   186  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   187  	}
   188  	insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
   189  
   190  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
   191  	// store new key at insert position
   192  	*(*uint32)(insertk) = key
   193  
   194  	h.count++
   195  
   196  done:
   197  	elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
   198  	if h.flags&hashWriting == 0 {
   199  		fatal("concurrent map writes")
   200  	}
   201  	h.flags &^= hashWriting
   202  	return elem
   203  }
   204  
   205  // mapassign_fast32ptr should be an internal detail,
   206  // but widely used packages access it using linkname.
   207  // Notable members of the hall of shame include:
   208  //   - github.com/ugorji/go/codec
   209  //
   210  // Do not remove or change the type signature.
   211  // See go.dev/issue/67401.
   212  //
   213  //go:linkname mapassign_fast32ptr
   214  func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   215  	if h == nil {
   216  		panic(plainError("assignment to entry in nil map"))
   217  	}
   218  	if raceenabled {
   219  		callerpc := sys.GetCallerPC()
   220  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
   221  	}
   222  	if h.flags&hashWriting != 0 {
   223  		fatal("concurrent map writes")
   224  	}
   225  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   226  
   227  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   228  	h.flags ^= hashWriting
   229  
   230  	if h.buckets == nil {
   231  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   232  	}
   233  
   234  again:
   235  	bucket := hash & bucketMask(h.B)
   236  	if h.growing() {
   237  		growWork_fast32(t, h, bucket)
   238  	}
   239  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   240  
   241  	var insertb *bmap
   242  	var inserti uintptr
   243  	var insertk unsafe.Pointer
   244  
   245  bucketloop:
   246  	for {
   247  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   248  			if isEmpty(b.tophash[i]) {
   249  				if insertb == nil {
   250  					inserti = i
   251  					insertb = b
   252  				}
   253  				if b.tophash[i] == emptyRest {
   254  					break bucketloop
   255  				}
   256  				continue
   257  			}
   258  			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
   259  			if k != key {
   260  				continue
   261  			}
   262  			inserti = i
   263  			insertb = b
   264  			goto done
   265  		}
   266  		ovf := b.overflow(t)
   267  		if ovf == nil {
   268  			break
   269  		}
   270  		b = ovf
   271  	}
   272  
   273  	// Did not find mapping for key. Allocate new cell & add entry.
   274  
   275  	// If we hit the max load factor or we have too many overflow buckets,
   276  	// and we're not already in the middle of growing, start growing.
   277  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   278  		hashGrow(t, h)
   279  		goto again // Growing the table invalidates everything, so try again
   280  	}
   281  
   282  	if insertb == nil {
   283  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   284  		insertb = h.newoverflow(t, b)
   285  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   286  	}
   287  	insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
   288  
   289  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
   290  	// store new key at insert position
   291  	*(*unsafe.Pointer)(insertk) = key
   292  
   293  	h.count++
   294  
   295  done:
   296  	elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
   297  	if h.flags&hashWriting == 0 {
   298  		fatal("concurrent map writes")
   299  	}
   300  	h.flags &^= hashWriting
   301  	return elem
   302  }
   303  
   304  func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
   305  	if raceenabled && h != nil {
   306  		callerpc := sys.GetCallerPC()
   307  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
   308  	}
   309  	if h == nil || h.count == 0 {
   310  		return
   311  	}
   312  	if h.flags&hashWriting != 0 {
   313  		fatal("concurrent map writes")
   314  	}
   315  
   316  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   317  
   318  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   319  	h.flags ^= hashWriting
   320  
   321  	bucket := hash & bucketMask(h.B)
   322  	if h.growing() {
   323  		growWork_fast32(t, h, bucket)
   324  	}
   325  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   326  	bOrig := b
   327  search:
   328  	for ; b != nil; b = b.overflow(t) {
   329  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
   330  			if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
   331  				continue
   332  			}
   333  			// Only clear key if there are pointers in it.
   334  			// This can only happen if pointers are 32 bit
   335  			// wide as 64 bit pointers do not fit into a 32 bit key.
   336  			if goarch.PtrSize == 4 && t.Key.Pointers() {
   337  				// The key must be a pointer as we checked pointers are
   338  				// 32 bits wide and the key is 32 bits wide also.
   339  				*(*unsafe.Pointer)(k) = nil
   340  			}
   341  			e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
   342  			if t.Elem.Pointers() {
   343  				memclrHasPointers(e, t.Elem.Size_)
   344  			} else {
   345  				memclrNoHeapPointers(e, t.Elem.Size_)
   346  			}
   347  			b.tophash[i] = emptyOne
   348  			// If the bucket now ends in a bunch of emptyOne states,
   349  			// change those to emptyRest states.
   350  			if i == abi.OldMapBucketCount-1 {
   351  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   352  					goto notLast
   353  				}
   354  			} else {
   355  				if b.tophash[i+1] != emptyRest {
   356  					goto notLast
   357  				}
   358  			}
   359  			for {
   360  				b.tophash[i] = emptyRest
   361  				if i == 0 {
   362  					if b == bOrig {
   363  						break // beginning of initial bucket, we're done.
   364  					}
   365  					// Find previous bucket, continue at its last entry.
   366  					c := b
   367  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   368  					}
   369  					i = abi.OldMapBucketCount - 1
   370  				} else {
   371  					i--
   372  				}
   373  				if b.tophash[i] != emptyOne {
   374  					break
   375  				}
   376  			}
   377  		notLast:
   378  			h.count--
   379  			// Reset the hash seed to make it more difficult for attackers to
   380  			// repeatedly trigger hash collisions. See issue 25237.
   381  			if h.count == 0 {
   382  				h.hash0 = uint32(rand())
   383  			}
   384  			break search
   385  		}
   386  	}
   387  
   388  	if h.flags&hashWriting == 0 {
   389  		fatal("concurrent map writes")
   390  	}
   391  	h.flags &^= hashWriting
   392  }
   393  
   394  func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
   395  	// make sure we evacuate the oldbucket corresponding
   396  	// to the bucket we're about to use
   397  	evacuate_fast32(t, h, bucket&h.oldbucketmask())
   398  
   399  	// evacuate one more oldbucket to make progress on growing
   400  	if h.growing() {
   401  		evacuate_fast32(t, h, h.nevacuate)
   402  	}
   403  }
   404  
   405  func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
   406  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   407  	newbit := h.noldbuckets()
   408  	if !evacuated(b) {
   409  		// TODO: reuse overflow buckets instead of using new ones, if there
   410  		// is no iterator using the old buckets.  (If !oldIterator.)
   411  
   412  		// xy contains the x and y (low and high) evacuation destinations.
   413  		var xy [2]evacDst
   414  		x := &xy[0]
   415  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
   416  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   417  		x.e = add(x.k, abi.OldMapBucketCount*4)
   418  
   419  		if !h.sameSizeGrow() {
   420  			// Only calculate y pointers if we're growing bigger.
   421  			// Otherwise GC can see bad pointers.
   422  			y := &xy[1]
   423  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
   424  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   425  			y.e = add(y.k, abi.OldMapBucketCount*4)
   426  		}
   427  
   428  		for ; b != nil; b = b.overflow(t) {
   429  			k := add(unsafe.Pointer(b), dataOffset)
   430  			e := add(k, abi.OldMapBucketCount*4)
   431  			for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
   432  				top := b.tophash[i]
   433  				if isEmpty(top) {
   434  					b.tophash[i] = evacuatedEmpty
   435  					continue
   436  				}
   437  				if top < minTopHash {
   438  					throw("bad map state")
   439  				}
   440  				var useY uint8
   441  				if !h.sameSizeGrow() {
   442  					// Compute hash to make our evacuation decision (whether we need
   443  					// to send this key/elem to bucket x or bucket y).
   444  					hash := t.Hasher(k, uintptr(h.hash0))
   445  					if hash&newbit != 0 {
   446  						useY = 1
   447  					}
   448  				}
   449  
   450  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   451  				dst := &xy[useY]                 // evacuation destination
   452  
   453  				if dst.i == abi.OldMapBucketCount {
   454  					dst.b = h.newoverflow(t, dst.b)
   455  					dst.i = 0
   456  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   457  					dst.e = add(dst.k, abi.OldMapBucketCount*4)
   458  				}
   459  				dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   460  
   461  				// Copy key.
   462  				if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
   463  					// Write with a write barrier.
   464  					*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
   465  				} else {
   466  					*(*uint32)(dst.k) = *(*uint32)(k)
   467  				}
   468  
   469  				typedmemmove(t.Elem, dst.e, e)
   470  				dst.i++
   471  				// These updates might push these pointers past the end of the
   472  				// key or elem arrays.  That's ok, as we have the overflow pointer
   473  				// at the end of the bucket to protect against pointing past the
   474  				// end of the bucket.
   475  				dst.k = add(dst.k, 4)
   476  				dst.e = add(dst.e, uintptr(t.ValueSize))
   477  			}
   478  		}
   479  		// Unlink the overflow buckets & clear key/elem to help GC.
   480  		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
   481  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
   482  			// Preserve b.tophash because the evacuation
   483  			// state is maintained there.
   484  			ptr := add(b, dataOffset)
   485  			n := uintptr(t.BucketSize) - dataOffset
   486  			memclrHasPointers(ptr, n)
   487  		}
   488  	}
   489  
   490  	if oldbucket == h.nevacuate {
   491  		advanceEvacuationMark(h, t, newbit)
   492  	}
   493  }
   494  

View as plain text