Source file src/internal/runtime/maps/runtime_faststr.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/race"
    11  	"internal/runtime/sys"
    12  	"unsafe"
    13  )
    14  
    15  func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
    16  	g := groupReference{
    17  		data: m.dirPtr,
    18  	}
    19  
    20  	ctrls := *g.ctrls()
    21  	slotKey := g.key(typ, 0)
    22  	slotSize := typ.SlotSize
    23  
    24  	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
    25  	// where there are 8 keys to check, all of which don't quick-match the lookup key.
    26  	// In that case, we can save hashing the lookup key. That savings is worth this extra code
    27  	// for strings that are long enough that hashing is expensive.
    28  	if len(key) > 64 {
    29  		// String hashing and equality might be expensive. Do a quick check first.
    30  		j := abi.MapGroupSlots
    31  		for i := range abi.MapGroupSlots {
    32  			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
    33  				if j < abi.MapGroupSlots {
    34  					// 2 strings both passed the quick equality test.
    35  					// Break out of this loop and do it the slow way.
    36  					goto dohash
    37  				}
    38  				j = i
    39  			}
    40  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    41  			ctrls >>= 8
    42  		}
    43  		if j == abi.MapGroupSlots {
    44  			// No slot passed the quick test.
    45  			return nil
    46  		}
    47  		// There's exactly one slot that passed the quick test. Do the single expensive comparison.
    48  		slotKey = g.key(typ, uintptr(j))
    49  		if key == *(*string)(slotKey) {
    50  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    51  		}
    52  		return nil
    53  	}
    54  
    55  dohash:
    56  	// This path will cost 1 hash and 1+ε comparisons.
    57  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    58  	h2 := uint8(h2(hash))
    59  	ctrls = *g.ctrls()
    60  	slotKey = g.key(typ, 0)
    61  
    62  	for range abi.MapGroupSlots {
    63  		if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
    64  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    65  		}
    66  		slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    67  		ctrls >>= 8
    68  	}
    69  	return nil
    70  }
    71  
    72  // Returns true if a and b might be equal.
    73  // Returns false if a and b are definitely not equal.
    74  // Requires len(a)>=8.
    75  func longStringQuickEqualityTest(a, b string) bool {
    76  	if len(a) != len(b) {
    77  		return false
    78  	}
    79  	x, y := stringPtr(a), stringPtr(b)
    80  	// Check first 8 bytes.
    81  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    82  		return false
    83  	}
    84  	// Check last 8 bytes.
    85  	x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8)
    86  	y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8)
    87  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    88  		return false
    89  	}
    90  	return true
    91  }
    92  func stringPtr(s string) unsafe.Pointer {
    93  	type stringStruct struct {
    94  		ptr unsafe.Pointer
    95  		len int
    96  	}
    97  	return (*stringStruct)(unsafe.Pointer(&s)).ptr
    98  }
    99  
   100  //go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
   101  func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   102  	if race.Enabled && m != nil {
   103  		callerpc := sys.GetCallerPC()
   104  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
   105  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   106  	}
   107  
   108  	if m == nil || m.Used() == 0 {
   109  		return unsafe.Pointer(&zeroVal[0])
   110  	}
   111  
   112  	if m.writing != 0 {
   113  		fatal("concurrent map read and map write")
   114  		return nil
   115  	}
   116  
   117  	if m.dirLen <= 0 {
   118  		elem := m.getWithoutKeySmallFastStr(typ, key)
   119  		if elem == nil {
   120  			return unsafe.Pointer(&zeroVal[0])
   121  		}
   122  		return elem
   123  	}
   124  
   125  	k := key
   126  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   127  
   128  	// Select table.
   129  	idx := m.directoryIndex(hash)
   130  	t := m.directoryAt(idx)
   131  
   132  	// Probe table.
   133  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   134  	for ; ; seq = seq.next() {
   135  		g := t.groups.group(typ, seq.offset)
   136  
   137  		match := g.ctrls().matchH2(h2(hash))
   138  
   139  		for match != 0 {
   140  			i := match.first()
   141  
   142  			slotKey := g.key(typ, i)
   143  			if key == *(*string)(slotKey) {
   144  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   145  				return slotElem
   146  			}
   147  			match = match.removeFirst()
   148  		}
   149  
   150  		match = g.ctrls().matchEmpty()
   151  		if match != 0 {
   152  			// Finding an empty slot means we've reached the end of
   153  			// the probe sequence.
   154  			return unsafe.Pointer(&zeroVal[0])
   155  		}
   156  	}
   157  }
   158  
   159  //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
   160  func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
   161  	if race.Enabled && m != nil {
   162  		callerpc := sys.GetCallerPC()
   163  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
   164  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   165  	}
   166  
   167  	if m == nil || m.Used() == 0 {
   168  		return unsafe.Pointer(&zeroVal[0]), false
   169  	}
   170  
   171  	if m.writing != 0 {
   172  		fatal("concurrent map read and map write")
   173  		return nil, false
   174  	}
   175  
   176  	if m.dirLen <= 0 {
   177  		elem := m.getWithoutKeySmallFastStr(typ, key)
   178  		if elem == nil {
   179  			return unsafe.Pointer(&zeroVal[0]), false
   180  		}
   181  		return elem, true
   182  	}
   183  
   184  	k := key
   185  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   186  
   187  	// Select table.
   188  	idx := m.directoryIndex(hash)
   189  	t := m.directoryAt(idx)
   190  
   191  	// Probe table.
   192  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   193  	for ; ; seq = seq.next() {
   194  		g := t.groups.group(typ, seq.offset)
   195  
   196  		match := g.ctrls().matchH2(h2(hash))
   197  
   198  		for match != 0 {
   199  			i := match.first()
   200  
   201  			slotKey := g.key(typ, i)
   202  			if key == *(*string)(slotKey) {
   203  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   204  				return slotElem, true
   205  			}
   206  			match = match.removeFirst()
   207  		}
   208  
   209  		match = g.ctrls().matchEmpty()
   210  		if match != 0 {
   211  			// Finding an empty slot means we've reached the end of
   212  			// the probe sequence.
   213  			return unsafe.Pointer(&zeroVal[0]), false
   214  		}
   215  	}
   216  }
   217  
   218  func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
   219  	g := groupReference{
   220  		data: m.dirPtr,
   221  	}
   222  
   223  	match := g.ctrls().matchH2(h2(hash))
   224  
   225  	// Look for an existing slot containing this key.
   226  	for match != 0 {
   227  		i := match.first()
   228  
   229  		slotKey := g.key(typ, i)
   230  		if key == *(*string)(slotKey) {
   231  			// Key needs update, as the backing storage may differ.
   232  			*(*string)(slotKey) = key
   233  			slotElem := g.elem(typ, i)
   234  			return slotElem
   235  		}
   236  		match = match.removeFirst()
   237  	}
   238  
   239  	// There can't be deleted slots, small maps can't have them
   240  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   241  	// more efficient than matchEmpty.
   242  	match = g.ctrls().matchEmptyOrDeleted()
   243  	if match == 0 {
   244  		fatal("small map with no empty slot (concurrent map writes?)")
   245  	}
   246  
   247  	i := match.first()
   248  
   249  	slotKey := g.key(typ, i)
   250  	*(*string)(slotKey) = key
   251  
   252  	slotElem := g.elem(typ, i)
   253  
   254  	g.ctrls().set(i, ctrl(h2(hash)))
   255  	m.used++
   256  
   257  	return slotElem
   258  }
   259  
   260  //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
   261  func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   262  	if m == nil {
   263  		panic(errNilAssign)
   264  	}
   265  	if race.Enabled {
   266  		callerpc := sys.GetCallerPC()
   267  		pc := abi.FuncPCABIInternal(runtime_mapassign_faststr)
   268  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   269  	}
   270  	if m.writing != 0 {
   271  		fatal("concurrent map writes")
   272  	}
   273  
   274  	k := key
   275  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   276  
   277  	// Set writing after calling Hasher, since Hasher may panic, in which
   278  	// case we have not actually done a write.
   279  	m.writing ^= 1 // toggle, see comment on writing
   280  
   281  	if m.dirPtr == nil {
   282  		m.growToSmall(typ)
   283  	}
   284  
   285  	if m.dirLen == 0 {
   286  		if m.used < abi.MapGroupSlots {
   287  			elem := m.putSlotSmallFastStr(typ, hash, key)
   288  
   289  			if m.writing == 0 {
   290  				fatal("concurrent map writes")
   291  			}
   292  			m.writing ^= 1
   293  
   294  			return elem
   295  		}
   296  
   297  		// Can't fit another entry, grow to full size map.
   298  		m.growToTable(typ)
   299  	}
   300  
   301  	var slotElem unsafe.Pointer
   302  outer:
   303  	for {
   304  		// Select table.
   305  		idx := m.directoryIndex(hash)
   306  		t := m.directoryAt(idx)
   307  
   308  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   309  
   310  		// As we look for a match, keep track of the first deleted slot
   311  		// we find, which we'll use to insert the new entry if
   312  		// necessary.
   313  		var firstDeletedGroup groupReference
   314  		var firstDeletedSlot uintptr
   315  
   316  		for ; ; seq = seq.next() {
   317  			g := t.groups.group(typ, seq.offset)
   318  			match := g.ctrls().matchH2(h2(hash))
   319  
   320  			// Look for an existing slot containing this key.
   321  			for match != 0 {
   322  				i := match.first()
   323  
   324  				slotKey := g.key(typ, i)
   325  				if key == *(*string)(slotKey) {
   326  					// Key needs update, as the backing
   327  					// storage may differ.
   328  					*(*string)(slotKey) = key
   329  					slotElem = g.elem(typ, i)
   330  
   331  					t.checkInvariants(typ, m)
   332  					break outer
   333  				}
   334  				match = match.removeFirst()
   335  			}
   336  
   337  			// No existing slot for this key in this group. Is this the end
   338  			// of the probe sequence?
   339  			match = g.ctrls().matchEmptyOrDeleted()
   340  			if match == 0 {
   341  				continue // nothing but filled slots. Keep probing.
   342  			}
   343  			i := match.first()
   344  			if g.ctrls().get(i) == ctrlDeleted {
   345  				// There are some deleted slots. Remember
   346  				// the first one, and keep probing.
   347  				if firstDeletedGroup.data == nil {
   348  					firstDeletedGroup = g
   349  					firstDeletedSlot = i
   350  				}
   351  				continue
   352  			}
   353  			// We've found an empty slot, which means we've reached the end of
   354  			// the probe sequence.
   355  
   356  			// If we found a deleted slot along the way, we can
   357  			// replace it without consuming growthLeft.
   358  			if firstDeletedGroup.data != nil {
   359  				g = firstDeletedGroup
   360  				i = firstDeletedSlot
   361  				t.growthLeft++ // will be decremented below to become a no-op.
   362  			}
   363  
   364  			// If we have no space left, first try to remove some tombstones.
   365  			if t.growthLeft == 0 {
   366  				t.pruneTombstones(typ, m)
   367  			}
   368  
   369  			// If there is room left to grow, just insert the new entry.
   370  			if t.growthLeft > 0 {
   371  				slotKey := g.key(typ, i)
   372  				*(*string)(slotKey) = key
   373  
   374  				slotElem = g.elem(typ, i)
   375  
   376  				g.ctrls().set(i, ctrl(h2(hash)))
   377  				t.growthLeft--
   378  				t.used++
   379  				m.used++
   380  
   381  				t.checkInvariants(typ, m)
   382  				break outer
   383  			}
   384  
   385  			t.rehash(typ, m)
   386  			continue outer
   387  		}
   388  	}
   389  
   390  	if m.writing == 0 {
   391  		fatal("concurrent map writes")
   392  	}
   393  	m.writing ^= 1
   394  
   395  	return slotElem
   396  }
   397  
   398  //go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
   399  func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
   400  	if race.Enabled {
   401  		callerpc := sys.GetCallerPC()
   402  		pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)
   403  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   404  	}
   405  
   406  	if m == nil || m.Used() == 0 {
   407  		return
   408  	}
   409  
   410  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   411  }
   412  

View as plain text