Source file src/runtime/map_fast64_noswiss.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !goexperiment.swissmap
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    17  	if raceenabled && h != nil {
    18  		callerpc := sys.GetCallerPC()
    19  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
    20  	}
    21  	if h == nil || h.count == 0 {
    22  		return unsafe.Pointer(&zeroVal[0])
    23  	}
    24  	if h.flags&hashWriting != 0 {
    25  		fatal("concurrent map read and map write")
    26  	}
    27  	var b *bmap
    28  	if h.B == 0 {
    29  		// One-bucket table. No need to hash.
    30  		b = (*bmap)(h.buckets)
    31  	} else {
    32  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    33  		m := bucketMask(h.B)
    34  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    35  		if c := h.oldbuckets; c != nil {
    36  			if !h.sameSizeGrow() {
    37  				// There used to be half as many buckets; mask down one more power of two.
    38  				m >>= 1
    39  			}
    40  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    41  			if !evacuated(oldb) {
    42  				b = oldb
    43  			}
    44  		}
    45  	}
    46  	for ; b != nil; b = b.overflow(t) {
    47  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
    48  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    49  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
    50  			}
    51  		}
    52  	}
    53  	return unsafe.Pointer(&zeroVal[0])
    54  }
    55  
    56  // mapaccess2_fast64 should be an internal detail,
    57  // but widely used packages access it using linkname.
    58  // Notable members of the hall of shame include:
    59  //   - github.com/ugorji/go/codec
    60  //
    61  // Do not remove or change the type signature.
    62  // See go.dev/issue/67401.
    63  //
    64  //go:linkname mapaccess2_fast64
    65  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
    66  	if raceenabled && h != nil {
    67  		callerpc := sys.GetCallerPC()
    68  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
    69  	}
    70  	if h == nil || h.count == 0 {
    71  		return unsafe.Pointer(&zeroVal[0]), false
    72  	}
    73  	if h.flags&hashWriting != 0 {
    74  		fatal("concurrent map read and map write")
    75  	}
    76  	var b *bmap
    77  	if h.B == 0 {
    78  		// One-bucket table. No need to hash.
    79  		b = (*bmap)(h.buckets)
    80  	} else {
    81  		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    82  		m := bucketMask(h.B)
    83  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
    84  		if c := h.oldbuckets; c != nil {
    85  			if !h.sameSizeGrow() {
    86  				// There used to be half as many buckets; mask down one more power of two.
    87  				m >>= 1
    88  			}
    89  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
    90  			if !evacuated(oldb) {
    91  				b = oldb
    92  			}
    93  		}
    94  	}
    95  	for ; b != nil; b = b.overflow(t) {
    96  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
    97  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    98  				return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
    99  			}
   100  		}
   101  	}
   102  	return unsafe.Pointer(&zeroVal[0]), false
   103  }
   104  
   105  // mapassign_fast64 should be an internal detail,
   106  // but widely used packages access it using linkname.
   107  // Notable members of the hall of shame include:
   108  //   - github.com/bytedance/sonic
   109  //   - github.com/ugorji/go/codec
   110  //
   111  // Do not remove or change the type signature.
   112  // See go.dev/issue/67401.
   113  //
   114  //go:linkname mapassign_fast64
   115  func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   116  	if h == nil {
   117  		panic(plainError("assignment to entry in nil map"))
   118  	}
   119  	if raceenabled {
   120  		callerpc := sys.GetCallerPC()
   121  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
   122  	}
   123  	if h.flags&hashWriting != 0 {
   124  		fatal("concurrent map writes")
   125  	}
   126  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   127  
   128  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   129  	h.flags ^= hashWriting
   130  
   131  	if h.buckets == nil {
   132  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   133  	}
   134  
   135  again:
   136  	bucket := hash & bucketMask(h.B)
   137  	if h.growing() {
   138  		growWork_fast64(t, h, bucket)
   139  	}
   140  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   141  
   142  	var insertb *bmap
   143  	var inserti uintptr
   144  	var insertk unsafe.Pointer
   145  
   146  bucketloop:
   147  	for {
   148  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   149  			if isEmpty(b.tophash[i]) {
   150  				if insertb == nil {
   151  					insertb = b
   152  					inserti = i
   153  				}
   154  				if b.tophash[i] == emptyRest {
   155  					break bucketloop
   156  				}
   157  				continue
   158  			}
   159  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   160  			if k != key {
   161  				continue
   162  			}
   163  			insertb = b
   164  			inserti = i
   165  			goto done
   166  		}
   167  		ovf := b.overflow(t)
   168  		if ovf == nil {
   169  			break
   170  		}
   171  		b = ovf
   172  	}
   173  
   174  	// Did not find mapping for key. Allocate new cell & add entry.
   175  
   176  	// If we hit the max load factor or we have too many overflow buckets,
   177  	// and we're not already in the middle of growing, start growing.
   178  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   179  		hashGrow(t, h)
   180  		goto again // Growing the table invalidates everything, so try again
   181  	}
   182  
   183  	if insertb == nil {
   184  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   185  		insertb = h.newoverflow(t, b)
   186  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   187  	}
   188  	insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
   189  
   190  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   191  	// store new key at insert position
   192  	*(*uint64)(insertk) = key
   193  
   194  	h.count++
   195  
   196  done:
   197  	elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
   198  	if h.flags&hashWriting == 0 {
   199  		fatal("concurrent map writes")
   200  	}
   201  	h.flags &^= hashWriting
   202  	return elem
   203  }
   204  
   205  // mapassign_fast64ptr should be an internal detail,
   206  // but widely used packages access it using linkname.
   207  // Notable members of the hall of shame include:
   208  //   - github.com/bytedance/sonic
   209  //   - github.com/ugorji/go/codec
   210  //
   211  // Do not remove or change the type signature.
   212  // See go.dev/issue/67401.
   213  //
   214  //go:linkname mapassign_fast64ptr
   215  func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   216  	if h == nil {
   217  		panic(plainError("assignment to entry in nil map"))
   218  	}
   219  	if raceenabled {
   220  		callerpc := sys.GetCallerPC()
   221  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
   222  	}
   223  	if h.flags&hashWriting != 0 {
   224  		fatal("concurrent map writes")
   225  	}
   226  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   227  
   228  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   229  	h.flags ^= hashWriting
   230  
   231  	if h.buckets == nil {
   232  		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
   233  	}
   234  
   235  again:
   236  	bucket := hash & bucketMask(h.B)
   237  	if h.growing() {
   238  		growWork_fast64(t, h, bucket)
   239  	}
   240  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   241  
   242  	var insertb *bmap
   243  	var inserti uintptr
   244  	var insertk unsafe.Pointer
   245  
   246  bucketloop:
   247  	for {
   248  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   249  			if isEmpty(b.tophash[i]) {
   250  				if insertb == nil {
   251  					insertb = b
   252  					inserti = i
   253  				}
   254  				if b.tophash[i] == emptyRest {
   255  					break bucketloop
   256  				}
   257  				continue
   258  			}
   259  			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
   260  			if k != key {
   261  				continue
   262  			}
   263  			insertb = b
   264  			inserti = i
   265  			goto done
   266  		}
   267  		ovf := b.overflow(t)
   268  		if ovf == nil {
   269  			break
   270  		}
   271  		b = ovf
   272  	}
   273  
   274  	// Did not find mapping for key. Allocate new cell & add entry.
   275  
   276  	// If we hit the max load factor or we have too many overflow buckets,
   277  	// and we're not already in the middle of growing, start growing.
   278  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   279  		hashGrow(t, h)
   280  		goto again // Growing the table invalidates everything, so try again
   281  	}
   282  
   283  	if insertb == nil {
   284  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   285  		insertb = h.newoverflow(t, b)
   286  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   287  	}
   288  	insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
   289  
   290  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   291  	// store new key at insert position
   292  	*(*unsafe.Pointer)(insertk) = key
   293  
   294  	h.count++
   295  
   296  done:
   297  	elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
   298  	if h.flags&hashWriting == 0 {
   299  		fatal("concurrent map writes")
   300  	}
   301  	h.flags &^= hashWriting
   302  	return elem
   303  }
   304  
   305  func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
   306  	if raceenabled && h != nil {
   307  		callerpc := sys.GetCallerPC()
   308  		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
   309  	}
   310  	if h == nil || h.count == 0 {
   311  		return
   312  	}
   313  	if h.flags&hashWriting != 0 {
   314  		fatal("concurrent map writes")
   315  	}
   316  
   317  	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   318  
   319  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   320  	h.flags ^= hashWriting
   321  
   322  	bucket := hash & bucketMask(h.B)
   323  	if h.growing() {
   324  		growWork_fast64(t, h, bucket)
   325  	}
   326  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   327  	bOrig := b
   328  search:
   329  	for ; b != nil; b = b.overflow(t) {
   330  		for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
   331  			if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
   332  				continue
   333  			}
   334  			// Only clear key if there are pointers in it.
   335  			if t.Key.Pointers() {
   336  				if goarch.PtrSize == 8 {
   337  					*(*unsafe.Pointer)(k) = nil
   338  				} else {
   339  					// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
   340  					// Just call memclrHasPointers instead of trying to handle all cases here.
   341  					memclrHasPointers(k, 8)
   342  				}
   343  			}
   344  			e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
   345  			if t.Elem.Pointers() {
   346  				memclrHasPointers(e, t.Elem.Size_)
   347  			} else {
   348  				memclrNoHeapPointers(e, t.Elem.Size_)
   349  			}
   350  			b.tophash[i] = emptyOne
   351  			// If the bucket now ends in a bunch of emptyOne states,
   352  			// change those to emptyRest states.
   353  			if i == abi.OldMapBucketCount-1 {
   354  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   355  					goto notLast
   356  				}
   357  			} else {
   358  				if b.tophash[i+1] != emptyRest {
   359  					goto notLast
   360  				}
   361  			}
   362  			for {
   363  				b.tophash[i] = emptyRest
   364  				if i == 0 {
   365  					if b == bOrig {
   366  						break // beginning of initial bucket, we're done.
   367  					}
   368  					// Find previous bucket, continue at its last entry.
   369  					c := b
   370  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   371  					}
   372  					i = abi.OldMapBucketCount - 1
   373  				} else {
   374  					i--
   375  				}
   376  				if b.tophash[i] != emptyOne {
   377  					break
   378  				}
   379  			}
   380  		notLast:
   381  			h.count--
   382  			// Reset the hash seed to make it more difficult for attackers to
   383  			// repeatedly trigger hash collisions. See issue 25237.
   384  			if h.count == 0 {
   385  				h.hash0 = uint32(rand())
   386  			}
   387  			break search
   388  		}
   389  	}
   390  
   391  	if h.flags&hashWriting == 0 {
   392  		fatal("concurrent map writes")
   393  	}
   394  	h.flags &^= hashWriting
   395  }
   396  
   397  func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
   398  	// make sure we evacuate the oldbucket corresponding
   399  	// to the bucket we're about to use
   400  	evacuate_fast64(t, h, bucket&h.oldbucketmask())
   401  
   402  	// evacuate one more oldbucket to make progress on growing
   403  	if h.growing() {
   404  		evacuate_fast64(t, h, h.nevacuate)
   405  	}
   406  }
   407  
   408  func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
   409  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   410  	newbit := h.noldbuckets()
   411  	if !evacuated(b) {
   412  		// TODO: reuse overflow buckets instead of using new ones, if there
   413  		// is no iterator using the old buckets.  (If !oldIterator.)
   414  
   415  		// xy contains the x and y (low and high) evacuation destinations.
   416  		var xy [2]evacDst
   417  		x := &xy[0]
   418  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
   419  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   420  		x.e = add(x.k, abi.OldMapBucketCount*8)
   421  
   422  		if !h.sameSizeGrow() {
   423  			// Only calculate y pointers if we're growing bigger.
   424  			// Otherwise GC can see bad pointers.
   425  			y := &xy[1]
   426  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
   427  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   428  			y.e = add(y.k, abi.OldMapBucketCount*8)
   429  		}
   430  
   431  		for ; b != nil; b = b.overflow(t) {
   432  			k := add(unsafe.Pointer(b), dataOffset)
   433  			e := add(k, abi.OldMapBucketCount*8)
   434  			for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
   435  				top := b.tophash[i]
   436  				if isEmpty(top) {
   437  					b.tophash[i] = evacuatedEmpty
   438  					continue
   439  				}
   440  				if top < minTopHash {
   441  					throw("bad map state")
   442  				}
   443  				var useY uint8
   444  				if !h.sameSizeGrow() {
   445  					// Compute hash to make our evacuation decision (whether we need
   446  					// to send this key/elem to bucket x or bucket y).
   447  					hash := t.Hasher(k, uintptr(h.hash0))
   448  					if hash&newbit != 0 {
   449  						useY = 1
   450  					}
   451  				}
   452  
   453  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   454  				dst := &xy[useY]                 // evacuation destination
   455  
   456  				if dst.i == abi.OldMapBucketCount {
   457  					dst.b = h.newoverflow(t, dst.b)
   458  					dst.i = 0
   459  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   460  					dst.e = add(dst.k, abi.OldMapBucketCount*8)
   461  				}
   462  				dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   463  
   464  				// Copy key.
   465  				if t.Key.Pointers() && writeBarrier.enabled {
   466  					if goarch.PtrSize == 8 {
   467  						// Write with a write barrier.
   468  						*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
   469  					} else {
   470  						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
   471  						// Give up and call typedmemmove.
   472  						typedmemmove(t.Key, dst.k, k)
   473  					}
   474  				} else {
   475  					*(*uint64)(dst.k) = *(*uint64)(k)
   476  				}
   477  
   478  				typedmemmove(t.Elem, dst.e, e)
   479  				dst.i++
   480  				// These updates might push these pointers past the end of the
   481  				// key or elem arrays.  That's ok, as we have the overflow pointer
   482  				// at the end of the bucket to protect against pointing past the
   483  				// end of the bucket.
   484  				dst.k = add(dst.k, 8)
   485  				dst.e = add(dst.e, uintptr(t.ValueSize))
   486  			}
   487  		}
   488  		// Unlink the overflow buckets & clear key/elem to help GC.
   489  		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
   490  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
   491  			// Preserve b.tophash because the evacuation
   492  			// state is maintained there.
   493  			ptr := add(b, dataOffset)
   494  			n := uintptr(t.BucketSize) - dataOffset
   495  			memclrHasPointers(ptr, n)
   496  		}
   497  	}
   498  
   499  	if oldbucket == h.nevacuate {
   500  		advanceEvacuationMark(h, t, newbit)
   501  	}
   502  }
   503  

View as plain text