Source file src/internal/runtime/maps/runtime_fast64.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/race"
    10  	"internal/runtime/sys"
    11  	"unsafe"
    12  )
    13  
    14  //go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
    15  func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
    16  	if race.Enabled && m != nil {
    17  		callerpc := sys.GetCallerPC()
    18  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
    19  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    20  	}
    21  
    22  	if m == nil || m.Used() == 0 {
    23  		return unsafe.Pointer(&zeroVal[0])
    24  	}
    25  
    26  	if m.writing != 0 {
    27  		fatal("concurrent map read and map write")
    28  		return nil
    29  	}
    30  
    31  	if m.dirLen == 0 {
    32  		g := groupReference{
    33  			data: m.dirPtr,
    34  		}
    35  		full := g.ctrls().matchFull()
    36  		slotKey := g.key(typ, 0)
    37  		slotSize := typ.SlotSize
    38  		for full != 0 {
    39  			if key == *(*uint64)(slotKey) && full.lowestSet() {
    40  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
    41  				return slotElem
    42  			}
    43  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    44  			full = full.shiftOutLowest()
    45  		}
    46  		return unsafe.Pointer(&zeroVal[0])
    47  	}
    48  
    49  	k := key
    50  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
    51  
    52  	// Select table.
    53  	idx := m.directoryIndex(hash)
    54  	t := m.directoryAt(idx)
    55  
    56  	// Probe table.
    57  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    58  	for ; ; seq = seq.next() {
    59  		g := t.groups.group(typ, seq.offset)
    60  
    61  		match := g.ctrls().matchH2(h2(hash))
    62  
    63  		for match != 0 {
    64  			i := match.first()
    65  
    66  			slotKey := g.key(typ, i)
    67  			if key == *(*uint64)(slotKey) {
    68  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
    69  				return slotElem
    70  			}
    71  			match = match.removeFirst()
    72  		}
    73  
    74  		match = g.ctrls().matchEmpty()
    75  		if match != 0 {
    76  			// Finding an empty slot means we've reached the end of
    77  			// the probe sequence.
    78  			return unsafe.Pointer(&zeroVal[0])
    79  		}
    80  	}
    81  }
    82  
    83  //go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
    84  func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
    85  	if race.Enabled && m != nil {
    86  		callerpc := sys.GetCallerPC()
    87  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
    88  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    89  	}
    90  
    91  	if m == nil || m.Used() == 0 {
    92  		return unsafe.Pointer(&zeroVal[0]), false
    93  	}
    94  
    95  	if m.writing != 0 {
    96  		fatal("concurrent map read and map write")
    97  		return nil, false
    98  	}
    99  
   100  	if m.dirLen == 0 {
   101  		g := groupReference{
   102  			data: m.dirPtr,
   103  		}
   104  		full := g.ctrls().matchFull()
   105  		slotKey := g.key(typ, 0)
   106  		slotSize := typ.SlotSize
   107  		for full != 0 {
   108  			if key == *(*uint64)(slotKey) && full.lowestSet() {
   109  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
   110  				return slotElem, true
   111  			}
   112  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
   113  			full = full.shiftOutLowest()
   114  		}
   115  		return unsafe.Pointer(&zeroVal[0]), false
   116  	}
   117  
   118  	k := key
   119  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   120  
   121  	// Select table.
   122  	idx := m.directoryIndex(hash)
   123  	t := m.directoryAt(idx)
   124  
   125  	// Probe table.
   126  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   127  	for ; ; seq = seq.next() {
   128  		g := t.groups.group(typ, seq.offset)
   129  
   130  		match := g.ctrls().matchH2(h2(hash))
   131  
   132  		for match != 0 {
   133  			i := match.first()
   134  
   135  			slotKey := g.key(typ, i)
   136  			if key == *(*uint64)(slotKey) {
   137  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
   138  				return slotElem, true
   139  			}
   140  			match = match.removeFirst()
   141  		}
   142  
   143  		match = g.ctrls().matchEmpty()
   144  		if match != 0 {
   145  			// Finding an empty slot means we've reached the end of
   146  			// the probe sequence.
   147  			return unsafe.Pointer(&zeroVal[0]), false
   148  		}
   149  	}
   150  }
   151  
   152  func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
   153  	g := groupReference{
   154  		data: m.dirPtr,
   155  	}
   156  
   157  	match := g.ctrls().matchH2(h2(hash))
   158  
   159  	// Look for an existing slot containing this key.
   160  	for match != 0 {
   161  		i := match.first()
   162  
   163  		slotKey := g.key(typ, i)
   164  		if key == *(*uint64)(slotKey) {
   165  			slotElem := g.elem(typ, i)
   166  			return slotElem
   167  		}
   168  		match = match.removeFirst()
   169  	}
   170  
   171  	// There can't be deleted slots, small maps can't have them
   172  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   173  	// more efficient than matchEmpty.
   174  	match = g.ctrls().matchEmptyOrDeleted()
   175  	if match == 0 {
   176  		fatal("small map with no empty slot (concurrent map writes?)")
   177  	}
   178  
   179  	i := match.first()
   180  
   181  	slotKey := g.key(typ, i)
   182  	*(*uint64)(slotKey) = key
   183  
   184  	slotElem := g.elem(typ, i)
   185  
   186  	g.ctrls().set(i, ctrl(h2(hash)))
   187  	m.used++
   188  
   189  	return slotElem
   190  }
   191  
   192  //go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
   193  func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
   194  	if m == nil {
   195  		panic(errNilAssign)
   196  	}
   197  	if race.Enabled {
   198  		callerpc := sys.GetCallerPC()
   199  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64)
   200  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   201  	}
   202  	if m.writing != 0 {
   203  		fatal("concurrent map writes")
   204  	}
   205  
   206  	k := key
   207  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   208  
   209  	// Set writing after calling Hasher, since Hasher may panic, in which
   210  	// case we have not actually done a write.
   211  	m.writing ^= 1 // toggle, see comment on writing
   212  
   213  	if m.dirPtr == nil {
   214  		m.growToSmall(typ)
   215  	}
   216  
   217  	if m.dirLen == 0 {
   218  		if m.used < abi.MapGroupSlots {
   219  			elem := m.putSlotSmallFast64(typ, hash, key)
   220  
   221  			if m.writing == 0 {
   222  				fatal("concurrent map writes")
   223  			}
   224  			m.writing ^= 1
   225  
   226  			return elem
   227  		}
   228  
   229  		// Can't fit another entry, grow to full size map.
   230  		m.growToTable(typ)
   231  	}
   232  
   233  	var slotElem unsafe.Pointer
   234  outer:
   235  	for {
   236  		// Select table.
   237  		idx := m.directoryIndex(hash)
   238  		t := m.directoryAt(idx)
   239  
   240  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   241  
   242  		// As we look for a match, keep track of the first deleted slot
   243  		// we find, which we'll use to insert the new entry if
   244  		// necessary.
   245  		var firstDeletedGroup groupReference
   246  		var firstDeletedSlot uintptr
   247  
   248  		for ; ; seq = seq.next() {
   249  			g := t.groups.group(typ, seq.offset)
   250  			match := g.ctrls().matchH2(h2(hash))
   251  
   252  			// Look for an existing slot containing this key.
   253  			for match != 0 {
   254  				i := match.first()
   255  
   256  				slotKey := g.key(typ, i)
   257  				if key == *(*uint64)(slotKey) {
   258  					slotElem = g.elem(typ, i)
   259  
   260  					t.checkInvariants(typ, m)
   261  					break outer
   262  				}
   263  				match = match.removeFirst()
   264  			}
   265  
   266  			// No existing slot for this key in this group. Is this the end
   267  			// of the probe sequence?
   268  			match = g.ctrls().matchEmptyOrDeleted()
   269  			if match == 0 {
   270  				continue // nothing but filled slots. Keep probing.
   271  			}
   272  			i := match.first()
   273  			if g.ctrls().get(i) == ctrlDeleted {
   274  				// There are some deleted slots. Remember
   275  				// the first one, and keep probing.
   276  				if firstDeletedGroup.data == nil {
   277  					firstDeletedGroup = g
   278  					firstDeletedSlot = i
   279  				}
   280  				continue
   281  			}
   282  			// We've found an empty slot, which means we've reached the end of
   283  			// the probe sequence.
   284  
   285  			// If we found a deleted slot along the way, we can
   286  			// replace it without consuming growthLeft.
   287  			if firstDeletedGroup.data != nil {
   288  				g = firstDeletedGroup
   289  				i = firstDeletedSlot
   290  				t.growthLeft++ // will be decremented below to become a no-op.
   291  			}
   292  
   293  			// If we have no space left, first try to remove some tombstones.
   294  			if t.growthLeft == 0 {
   295  				t.pruneTombstones(typ, m)
   296  			}
   297  
   298  			// If there is room left to grow, just insert the new entry.
   299  			if t.growthLeft > 0 {
   300  				slotKey := g.key(typ, i)
   301  				*(*uint64)(slotKey) = key
   302  
   303  				slotElem = g.elem(typ, i)
   304  
   305  				g.ctrls().set(i, ctrl(h2(hash)))
   306  				t.growthLeft--
   307  				t.used++
   308  				m.used++
   309  
   310  				t.checkInvariants(typ, m)
   311  				break outer
   312  			}
   313  
   314  			t.rehash(typ, m)
   315  			continue outer
   316  		}
   317  	}
   318  
   319  	if m.writing == 0 {
   320  		fatal("concurrent map writes")
   321  	}
   322  	m.writing ^= 1
   323  
   324  	return slotElem
   325  }
   326  
   327  func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
   328  	g := groupReference{
   329  		data: m.dirPtr,
   330  	}
   331  
   332  	match := g.ctrls().matchH2(h2(hash))
   333  
   334  	// Look for an existing slot containing this key.
   335  	for match != 0 {
   336  		i := match.first()
   337  
   338  		slotKey := g.key(typ, i)
   339  		if key == *(*unsafe.Pointer)(slotKey) {
   340  			slotElem := g.elem(typ, i)
   341  			return slotElem
   342  		}
   343  		match = match.removeFirst()
   344  	}
   345  
   346  	// There can't be deleted slots, small maps can't have them
   347  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   348  	// more efficient than matchEmpty.
   349  	match = g.ctrls().matchEmptyOrDeleted()
   350  	if match == 0 {
   351  		fatal("small map with no empty slot (concurrent map writes?)")
   352  	}
   353  
   354  	i := match.first()
   355  
   356  	slotKey := g.key(typ, i)
   357  	*(*unsafe.Pointer)(slotKey) = key
   358  
   359  	slotElem := g.elem(typ, i)
   360  
   361  	g.ctrls().set(i, ctrl(h2(hash)))
   362  	m.used++
   363  
   364  	return slotElem
   365  }
   366  
   367  // Key is a 64-bit pointer (only called on 64-bit GOARCH).
   368  //
   369  //go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
   370  func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   371  	if m == nil {
   372  		panic(errNilAssign)
   373  	}
   374  	if race.Enabled {
   375  		callerpc := sys.GetCallerPC()
   376  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64ptr)
   377  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   378  	}
   379  	if m.writing != 0 {
   380  		fatal("concurrent map writes")
   381  	}
   382  
   383  	k := key
   384  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   385  
   386  	// Set writing after calling Hasher, since Hasher may panic, in which
   387  	// case we have not actually done a write.
   388  	m.writing ^= 1 // toggle, see comment on writing
   389  
   390  	if m.dirPtr == nil {
   391  		m.growToSmall(typ)
   392  	}
   393  
   394  	if m.dirLen == 0 {
   395  		if m.used < abi.MapGroupSlots {
   396  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   397  
   398  			if m.writing == 0 {
   399  				fatal("concurrent map writes")
   400  			}
   401  			m.writing ^= 1
   402  
   403  			return elem
   404  		}
   405  
   406  		// Can't fit another entry, grow to full size map.
   407  		m.growToTable(typ)
   408  	}
   409  
   410  	var slotElem unsafe.Pointer
   411  outer:
   412  	for {
   413  		// Select table.
   414  		idx := m.directoryIndex(hash)
   415  		t := m.directoryAt(idx)
   416  
   417  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   418  
   419  		// As we look for a match, keep track of the first deleted slot
   420  		// we find, which we'll use to insert the new entry if
   421  		// necessary.
   422  		var firstDeletedGroup groupReference
   423  		var firstDeletedSlot uintptr
   424  
   425  		for ; ; seq = seq.next() {
   426  			g := t.groups.group(typ, seq.offset)
   427  			match := g.ctrls().matchH2(h2(hash))
   428  
   429  			// Look for an existing slot containing this key.
   430  			for match != 0 {
   431  				i := match.first()
   432  
   433  				slotKey := g.key(typ, i)
   434  				if key == *(*unsafe.Pointer)(slotKey) {
   435  					slotElem = g.elem(typ, i)
   436  
   437  					t.checkInvariants(typ, m)
   438  					break outer
   439  				}
   440  				match = match.removeFirst()
   441  			}
   442  
   443  			// No existing slot for this key in this group. Is this the end
   444  			// of the probe sequence?
   445  			match = g.ctrls().matchEmptyOrDeleted()
   446  			if match == 0 {
   447  				continue // nothing but filled slots. Keep probing.
   448  			}
   449  			i := match.first()
   450  			if g.ctrls().get(i) == ctrlDeleted {
   451  				// There are some deleted slots. Remember
   452  				// the first one, and keep probing.
   453  				if firstDeletedGroup.data == nil {
   454  					firstDeletedGroup = g
   455  					firstDeletedSlot = i
   456  				}
   457  				continue
   458  			}
   459  			// We've found an empty slot, which means we've reached the end of
   460  			// the probe sequence.
   461  
   462  			// If we found a deleted slot along the way, we can
   463  			// replace it without consuming growthLeft.
   464  			if firstDeletedGroup.data != nil {
   465  				g = firstDeletedGroup
   466  				i = firstDeletedSlot
   467  				t.growthLeft++ // will be decremented below to become a no-op.
   468  			}
   469  
   470  			// If there is room left to grow, just insert the new entry.
   471  			if t.growthLeft > 0 {
   472  				slotKey := g.key(typ, i)
   473  				*(*unsafe.Pointer)(slotKey) = key
   474  
   475  				slotElem = g.elem(typ, i)
   476  
   477  				g.ctrls().set(i, ctrl(h2(hash)))
   478  				t.growthLeft--
   479  				t.used++
   480  				m.used++
   481  
   482  				t.checkInvariants(typ, m)
   483  				break outer
   484  			}
   485  
   486  			t.rehash(typ, m)
   487  			continue outer
   488  		}
   489  	}
   490  
   491  	if m.writing == 0 {
   492  		fatal("concurrent map writes")
   493  	}
   494  	m.writing ^= 1
   495  
   496  	return slotElem
   497  }
   498  
   499  //go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
   500  func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
   501  	if race.Enabled {
   502  		callerpc := sys.GetCallerPC()
   503  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)
   504  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   505  	}
   506  
   507  	if m == nil || m.Used() == 0 {
   508  		return
   509  	}
   510  
   511  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   512  }
   513  

View as plain text