Source file src/internal/runtime/maps/runtime_fast32_swiss.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.swissmap
     6  
     7  package maps
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
    17  func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
    18  	if race.Enabled && m != nil {
    19  		callerpc := sys.GetCallerPC()
    20  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    21  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    22  	}
    23  
    24  	if m == nil || m.Used() == 0 {
    25  		return unsafe.Pointer(&zeroVal[0])
    26  	}
    27  
    28  	if m.writing != 0 {
    29  		fatal("concurrent map read and map write")
    30  		return nil
    31  	}
    32  
    33  	if m.dirLen == 0 {
    34  		g := groupReference{
    35  			data: m.dirPtr,
    36  		}
    37  
    38  		slotSize := typ.SlotSize
    39  		for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
    40  			if key == *(*uint32)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
    41  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
    42  				return slotElem
    43  			}
    44  		}
    45  		return unsafe.Pointer(&zeroVal[0])
    46  	}
    47  
    48  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    49  
    50  	// Select table.
    51  	idx := m.directoryIndex(hash)
    52  	t := m.directoryAt(idx)
    53  
    54  	// Probe table.
    55  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    56  	for ; ; seq = seq.next() {
    57  		g := t.groups.group(typ, seq.offset)
    58  
    59  		match := g.ctrls().matchH2(h2(hash))
    60  
    61  		for match != 0 {
    62  			i := match.first()
    63  
    64  			slotKey := g.key(typ, i)
    65  			if key == *(*uint32)(slotKey) {
    66  				slotElem := g.elem(typ, i)
    67  				return slotElem
    68  			}
    69  			match = match.removeFirst()
    70  		}
    71  
    72  		match = g.ctrls().matchEmpty()
    73  		if match != 0 {
    74  			// Finding an empty slot means we've reached the end of
    75  			// the probe sequence.
    76  			return unsafe.Pointer(&zeroVal[0])
    77  		}
    78  	}
    79  }
    80  
    81  //go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
    82  func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) {
    83  	if race.Enabled && m != nil {
    84  		callerpc := sys.GetCallerPC()
    85  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    86  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    87  	}
    88  
    89  	if m == nil || m.Used() == 0 {
    90  		return unsafe.Pointer(&zeroVal[0]), false
    91  	}
    92  
    93  	if m.writing != 0 {
    94  		fatal("concurrent map read and map write")
    95  		return nil, false
    96  	}
    97  
    98  	if m.dirLen == 0 {
    99  		g := groupReference{
   100  			data: m.dirPtr,
   101  		}
   102  
   103  		slotSize := typ.SlotSize
   104  		for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
   105  			if key == *(*uint32)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
   106  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
   107  				return slotElem, true
   108  			}
   109  		}
   110  		return unsafe.Pointer(&zeroVal[0]), false
   111  	}
   112  
   113  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   114  
   115  	// Select table.
   116  	idx := m.directoryIndex(hash)
   117  	t := m.directoryAt(idx)
   118  
   119  	// Probe table.
   120  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   121  	for ; ; seq = seq.next() {
   122  		g := t.groups.group(typ, seq.offset)
   123  
   124  		match := g.ctrls().matchH2(h2(hash))
   125  
   126  		for match != 0 {
   127  			i := match.first()
   128  
   129  			slotKey := g.key(typ, i)
   130  			if key == *(*uint32)(slotKey) {
   131  				slotElem := g.elem(typ, i)
   132  				return slotElem, true
   133  			}
   134  			match = match.removeFirst()
   135  		}
   136  
   137  		match = g.ctrls().matchEmpty()
   138  		if match != 0 {
   139  			// Finding an empty slot means we've reached the end of
   140  			// the probe sequence.
   141  			return unsafe.Pointer(&zeroVal[0]), false
   142  		}
   143  	}
   144  }
   145  
   146  func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer {
   147  	g := groupReference{
   148  		data: m.dirPtr,
   149  	}
   150  
   151  	match := g.ctrls().matchH2(h2(hash))
   152  
   153  	// Look for an existing slot containing this key.
   154  	for match != 0 {
   155  		i := match.first()
   156  
   157  		slotKey := g.key(typ, i)
   158  		if key == *(*uint32)(slotKey) {
   159  			slotElem := g.elem(typ, i)
   160  			return slotElem
   161  		}
   162  		match = match.removeFirst()
   163  	}
   164  
   165  	// There can't be deleted slots, small maps can't have them
   166  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   167  	// more efficient than matchEmpty.
   168  	match = g.ctrls().matchEmptyOrDeleted()
   169  	if match == 0 {
   170  		fatal("small map with no empty slot (concurrent map writes?)")
   171  	}
   172  
   173  	i := match.first()
   174  
   175  	slotKey := g.key(typ, i)
   176  	*(*uint32)(slotKey) = key
   177  
   178  	slotElem := g.elem(typ, i)
   179  
   180  	g.ctrls().set(i, ctrl(h2(hash)))
   181  	m.used++
   182  
   183  	return slotElem
   184  }
   185  
   186  //go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
   187  func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
   188  	if m == nil {
   189  		panic(errNilAssign)
   190  	}
   191  	if race.Enabled {
   192  		callerpc := sys.GetCallerPC()
   193  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   194  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   195  	}
   196  	if m.writing != 0 {
   197  		fatal("concurrent map writes")
   198  	}
   199  
   200  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   201  
   202  	// Set writing after calling Hasher, since Hasher may panic, in which
   203  	// case we have not actually done a write.
   204  	m.writing ^= 1 // toggle, see comment on writing
   205  
   206  	if m.dirPtr == nil {
   207  		m.growToSmall(typ)
   208  	}
   209  
   210  	if m.dirLen == 0 {
   211  		if m.used < abi.SwissMapGroupSlots {
   212  			elem := m.putSlotSmallFast32(typ, hash, key)
   213  
   214  			if m.writing == 0 {
   215  				fatal("concurrent map writes")
   216  			}
   217  			m.writing ^= 1
   218  
   219  			return elem
   220  		}
   221  
   222  		// Can't fit another entry, grow to full size map.
   223  		m.growToTable(typ)
   224  	}
   225  
   226  	var slotElem unsafe.Pointer
   227  outer:
   228  	for {
   229  		// Select table.
   230  		idx := m.directoryIndex(hash)
   231  		t := m.directoryAt(idx)
   232  
   233  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   234  
   235  		// As we look for a match, keep track of the first deleted slot
   236  		// we find, which we'll use to insert the new entry if
   237  		// necessary.
   238  		var firstDeletedGroup groupReference
   239  		var firstDeletedSlot uintptr
   240  
   241  		for ; ; seq = seq.next() {
   242  			g := t.groups.group(typ, seq.offset)
   243  			match := g.ctrls().matchH2(h2(hash))
   244  
   245  			// Look for an existing slot containing this key.
   246  			for match != 0 {
   247  				i := match.first()
   248  
   249  				slotKey := g.key(typ, i)
   250  				if key == *(*uint32)(slotKey) {
   251  					slotElem = g.elem(typ, i)
   252  
   253  					t.checkInvariants(typ, m)
   254  					break outer
   255  				}
   256  				match = match.removeFirst()
   257  			}
   258  
   259  			// No existing slot for this key in this group. Is this the end
   260  			// of the probe sequence?
   261  			match = g.ctrls().matchEmptyOrDeleted()
   262  			if match == 0 {
   263  				continue // nothing but filled slots. Keep probing.
   264  			}
   265  			i := match.first()
   266  			if g.ctrls().get(i) == ctrlDeleted {
   267  				// There are some deleted slots. Remember
   268  				// the first one, and keep probing.
   269  				if firstDeletedGroup.data == nil {
   270  					firstDeletedGroup = g
   271  					firstDeletedSlot = i
   272  				}
   273  				continue
   274  			}
   275  			// We've found an empty slot, which means we've reached the end of
   276  			// the probe sequence.
   277  
   278  			// If we found a deleted slot along the way, we can
   279  			// replace it without consuming growthLeft.
   280  			if firstDeletedGroup.data != nil {
   281  				g = firstDeletedGroup
   282  				i = firstDeletedSlot
   283  				t.growthLeft++ // will be decremented below to become a no-op.
   284  			}
   285  
   286  			// If there is room left to grow, just insert the new entry.
   287  			if t.growthLeft > 0 {
   288  				slotKey := g.key(typ, i)
   289  				*(*uint32)(slotKey) = key
   290  
   291  				slotElem = g.elem(typ, i)
   292  
   293  				g.ctrls().set(i, ctrl(h2(hash)))
   294  				t.growthLeft--
   295  				t.used++
   296  				m.used++
   297  
   298  				t.checkInvariants(typ, m)
   299  				break outer
   300  			}
   301  
   302  			t.rehash(typ, m)
   303  			continue outer
   304  		}
   305  	}
   306  
   307  	if m.writing == 0 {
   308  		fatal("concurrent map writes")
   309  	}
   310  	m.writing ^= 1
   311  
   312  	return slotElem
   313  }
   314  
   315  // Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr.
   316  //
   317  // TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
   318  //
   319  //go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
   320  func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   321  	if m == nil {
   322  		panic(errNilAssign)
   323  	}
   324  	if race.Enabled {
   325  		callerpc := sys.GetCallerPC()
   326  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   327  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   328  	}
   329  	if m.writing != 0 {
   330  		fatal("concurrent map writes")
   331  	}
   332  
   333  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   334  
   335  	// Set writing after calling Hasher, since Hasher may panic, in which
   336  	// case we have not actually done a write.
   337  	m.writing ^= 1 // toggle, see comment on writing
   338  
   339  	if m.dirPtr == nil {
   340  		m.growToSmall(typ)
   341  	}
   342  
   343  	if m.dirLen == 0 {
   344  		if m.used < abi.SwissMapGroupSlots {
   345  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   346  
   347  			if m.writing == 0 {
   348  				fatal("concurrent map writes")
   349  			}
   350  			m.writing ^= 1
   351  
   352  			return elem
   353  		}
   354  
   355  		// Can't fit another entry, grow to full size map.
   356  		m.growToTable(typ)
   357  	}
   358  
   359  	var slotElem unsafe.Pointer
   360  outer:
   361  	for {
   362  		// Select table.
   363  		idx := m.directoryIndex(hash)
   364  		t := m.directoryAt(idx)
   365  
   366  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   367  
   368  		// As we look for a match, keep track of the first deleted slot we
   369  		// find, which we'll use to insert the new entry if necessary.
   370  		var firstDeletedGroup groupReference
   371  		var firstDeletedSlot uintptr
   372  
   373  		for ; ; seq = seq.next() {
   374  			g := t.groups.group(typ, seq.offset)
   375  			match := g.ctrls().matchH2(h2(hash))
   376  
   377  			// Look for an existing slot containing this key.
   378  			for match != 0 {
   379  				i := match.first()
   380  
   381  				slotKey := g.key(typ, i)
   382  				if key == *(*unsafe.Pointer)(slotKey) {
   383  					slotElem = g.elem(typ, i)
   384  
   385  					t.checkInvariants(typ, m)
   386  					break outer
   387  				}
   388  				match = match.removeFirst()
   389  			}
   390  
   391  			// No existing slot for this key in this group. Is this the end
   392  			// of the probe sequence?
   393  			match = g.ctrls().matchEmptyOrDeleted()
   394  			if match == 0 {
   395  				continue // nothing but filled slots. Keep probing.
   396  			}
   397  			i := match.first()
   398  			if g.ctrls().get(i) == ctrlDeleted {
   399  				// There are some deleted slots. Remember
   400  				// the first one, and keep probing.
   401  				if firstDeletedGroup.data == nil {
   402  					firstDeletedGroup = g
   403  					firstDeletedSlot = i
   404  				}
   405  				continue
   406  			}
   407  			// We've found an empty slot, which means we've reached the end of
   408  			// the probe sequence.
   409  
   410  			// If we found a deleted slot along the way, we can
   411  			// replace it without consuming growthLeft.
   412  			if firstDeletedGroup.data != nil {
   413  				g = firstDeletedGroup
   414  				i = firstDeletedSlot
   415  				t.growthLeft++ // will be decremented below to become a no-op.
   416  			}
   417  
   418  			// If there is room left to grow, just insert the new entry.
   419  			if t.growthLeft > 0 {
   420  				slotKey := g.key(typ, i)
   421  				*(*unsafe.Pointer)(slotKey) = key
   422  
   423  				slotElem = g.elem(typ, i)
   424  
   425  				g.ctrls().set(i, ctrl(h2(hash)))
   426  				t.growthLeft--
   427  				t.used++
   428  				m.used++
   429  
   430  				t.checkInvariants(typ, m)
   431  				break outer
   432  			}
   433  
   434  			t.rehash(typ, m)
   435  			continue outer
   436  		}
   437  	}
   438  
   439  	if m.writing == 0 {
   440  		fatal("concurrent map writes")
   441  	}
   442  	m.writing ^= 1
   443  
   444  	return slotElem
   445  }
   446  
   447  //go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
   448  func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) {
   449  	if race.Enabled {
   450  		callerpc := sys.GetCallerPC()
   451  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   452  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   453  	}
   454  
   455  	if m == nil || m.Used() == 0 {
   456  		return
   457  	}
   458  
   459  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   460  }
   461  

View as plain text