Source file src/internal/runtime/maps/runtime_fast64_swiss.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.swissmap
     6  
     7  package maps
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
    17  func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
    18  	if race.Enabled && m != nil {
    19  		callerpc := sys.GetCallerPC()
    20  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    21  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    22  	}
    23  
    24  	if m == nil || m.Used() == 0 {
    25  		return unsafe.Pointer(&zeroVal[0])
    26  	}
    27  
    28  	if m.writing != 0 {
    29  		fatal("concurrent map read and map write")
    30  		return nil
    31  	}
    32  
    33  	if m.dirLen == 0 {
    34  		g := groupReference{
    35  			data: m.dirPtr,
    36  		}
    37  
    38  		slotSize := typ.SlotSize
    39  		for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
    40  			if key == *(*uint64)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
    41  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
    42  				return slotElem
    43  			}
    44  		}
    45  		return unsafe.Pointer(&zeroVal[0])
    46  	}
    47  
    48  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    49  
    50  	// Select table.
    51  	idx := m.directoryIndex(hash)
    52  	t := m.directoryAt(idx)
    53  
    54  	// Probe table.
    55  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    56  	for ; ; seq = seq.next() {
    57  		g := t.groups.group(typ, seq.offset)
    58  
    59  		match := g.ctrls().matchH2(h2(hash))
    60  
    61  		for match != 0 {
    62  			i := match.first()
    63  
    64  			slotKey := g.key(typ, i)
    65  			if key == *(*uint64)(slotKey) {
    66  				slotElem := g.elem(typ, i)
    67  				return slotElem
    68  			}
    69  			match = match.removeFirst()
    70  		}
    71  
    72  		match = g.ctrls().matchEmpty()
    73  		if match != 0 {
    74  			// Finding an empty slot means we've reached the end of
    75  			// the probe sequence.
    76  			return unsafe.Pointer(&zeroVal[0])
    77  		}
    78  	}
    79  }
    80  
    81  //go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
    82  func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) {
    83  	if race.Enabled && m != nil {
    84  		callerpc := sys.GetCallerPC()
    85  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    86  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    87  	}
    88  
    89  	if m == nil || m.Used() == 0 {
    90  		return unsafe.Pointer(&zeroVal[0]), false
    91  	}
    92  
    93  	if m.writing != 0 {
    94  		fatal("concurrent map read and map write")
    95  		return nil, false
    96  	}
    97  
    98  	if m.dirLen == 0 {
    99  		g := groupReference{
   100  			data: m.dirPtr,
   101  		}
   102  		slotSize := typ.SlotSize
   103  		for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
   104  			if key == *(*uint64)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
   105  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
   106  				return slotElem, true
   107  			}
   108  		}
   109  		return unsafe.Pointer(&zeroVal[0]), false
   110  	}
   111  
   112  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   113  
   114  	// Select table.
   115  	idx := m.directoryIndex(hash)
   116  	t := m.directoryAt(idx)
   117  
   118  	// Probe table.
   119  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   120  	for ; ; seq = seq.next() {
   121  		g := t.groups.group(typ, seq.offset)
   122  
   123  		match := g.ctrls().matchH2(h2(hash))
   124  
   125  		for match != 0 {
   126  			i := match.first()
   127  
   128  			slotKey := g.key(typ, i)
   129  			if key == *(*uint64)(slotKey) {
   130  				slotElem := g.elem(typ, i)
   131  				return slotElem, true
   132  			}
   133  			match = match.removeFirst()
   134  		}
   135  
   136  		match = g.ctrls().matchEmpty()
   137  		if match != 0 {
   138  			// Finding an empty slot means we've reached the end of
   139  			// the probe sequence.
   140  			return unsafe.Pointer(&zeroVal[0]), false
   141  		}
   142  	}
   143  }
   144  
   145  func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer {
   146  	g := groupReference{
   147  		data: m.dirPtr,
   148  	}
   149  
   150  	match := g.ctrls().matchH2(h2(hash))
   151  
   152  	// Look for an existing slot containing this key.
   153  	for match != 0 {
   154  		i := match.first()
   155  
   156  		slotKey := g.key(typ, i)
   157  		if key == *(*uint64)(slotKey) {
   158  			slotElem := g.elem(typ, i)
   159  			return slotElem
   160  		}
   161  		match = match.removeFirst()
   162  	}
   163  
   164  	// There can't be deleted slots, small maps can't have them
   165  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   166  	// more efficient than matchEmpty.
   167  	match = g.ctrls().matchEmptyOrDeleted()
   168  	if match == 0 {
   169  		fatal("small map with no empty slot (concurrent map writes?)")
   170  	}
   171  
   172  	i := match.first()
   173  
   174  	slotKey := g.key(typ, i)
   175  	*(*uint64)(slotKey) = key
   176  
   177  	slotElem := g.elem(typ, i)
   178  
   179  	g.ctrls().set(i, ctrl(h2(hash)))
   180  	m.used++
   181  
   182  	return slotElem
   183  }
   184  
   185  //go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
   186  func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
   187  	if m == nil {
   188  		panic(errNilAssign)
   189  	}
   190  	if race.Enabled {
   191  		callerpc := sys.GetCallerPC()
   192  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   193  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   194  	}
   195  	if m.writing != 0 {
   196  		fatal("concurrent map writes")
   197  	}
   198  
   199  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   200  
   201  	// Set writing after calling Hasher, since Hasher may panic, in which
   202  	// case we have not actually done a write.
   203  	m.writing ^= 1 // toggle, see comment on writing
   204  
   205  	if m.dirPtr == nil {
   206  		m.growToSmall(typ)
   207  	}
   208  
   209  	if m.dirLen == 0 {
   210  		if m.used < abi.SwissMapGroupSlots {
   211  			elem := m.putSlotSmallFast64(typ, hash, key)
   212  
   213  			if m.writing == 0 {
   214  				fatal("concurrent map writes")
   215  			}
   216  			m.writing ^= 1
   217  
   218  			return elem
   219  		}
   220  
   221  		// Can't fit another entry, grow to full size map.
   222  		m.growToTable(typ)
   223  	}
   224  
   225  	var slotElem unsafe.Pointer
   226  outer:
   227  	for {
   228  		// Select table.
   229  		idx := m.directoryIndex(hash)
   230  		t := m.directoryAt(idx)
   231  
   232  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   233  
   234  		// As we look for a match, keep track of the first deleted slot
   235  		// we find, which we'll use to insert the new entry if
   236  		// necessary.
   237  		var firstDeletedGroup groupReference
   238  		var firstDeletedSlot uintptr
   239  
   240  		for ; ; seq = seq.next() {
   241  			g := t.groups.group(typ, seq.offset)
   242  			match := g.ctrls().matchH2(h2(hash))
   243  
   244  			// Look for an existing slot containing this key.
   245  			for match != 0 {
   246  				i := match.first()
   247  
   248  				slotKey := g.key(typ, i)
   249  				if key == *(*uint64)(slotKey) {
   250  					slotElem = g.elem(typ, i)
   251  
   252  					t.checkInvariants(typ, m)
   253  					break outer
   254  				}
   255  				match = match.removeFirst()
   256  			}
   257  
   258  			// No existing slot for this key in this group. Is this the end
   259  			// of the probe sequence?
   260  			match = g.ctrls().matchEmptyOrDeleted()
   261  			if match == 0 {
   262  				continue // nothing but filled slots. Keep probing.
   263  			}
   264  			i := match.first()
   265  			if g.ctrls().get(i) == ctrlDeleted {
   266  				// There are some deleted slots. Remember
   267  				// the first one, and keep probing.
   268  				if firstDeletedGroup.data == nil {
   269  					firstDeletedGroup = g
   270  					firstDeletedSlot = i
   271  				}
   272  				continue
   273  			}
   274  			// We've found an empty slot, which means we've reached the end of
   275  			// the probe sequence.
   276  
   277  			// If we found a deleted slot along the way, we can
   278  			// replace it without consuming growthLeft.
   279  			if firstDeletedGroup.data != nil {
   280  				g = firstDeletedGroup
   281  				i = firstDeletedSlot
   282  				t.growthLeft++ // will be decremented below to become a no-op.
   283  			}
   284  
   285  			// If there is room left to grow, just insert the new entry.
   286  			if t.growthLeft > 0 {
   287  				slotKey := g.key(typ, i)
   288  				*(*uint64)(slotKey) = key
   289  
   290  				slotElem = g.elem(typ, i)
   291  
   292  				g.ctrls().set(i, ctrl(h2(hash)))
   293  				t.growthLeft--
   294  				t.used++
   295  				m.used++
   296  
   297  				t.checkInvariants(typ, m)
   298  				break outer
   299  			}
   300  
   301  			t.rehash(typ, m)
   302  			continue outer
   303  		}
   304  	}
   305  
   306  	if m.writing == 0 {
   307  		fatal("concurrent map writes")
   308  	}
   309  	m.writing ^= 1
   310  
   311  	return slotElem
   312  }
   313  
   314  func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
   315  	g := groupReference{
   316  		data: m.dirPtr,
   317  	}
   318  
   319  	match := g.ctrls().matchH2(h2(hash))
   320  
   321  	// Look for an existing slot containing this key.
   322  	for match != 0 {
   323  		i := match.first()
   324  
   325  		slotKey := g.key(typ, i)
   326  		if key == *(*unsafe.Pointer)(slotKey) {
   327  			slotElem := g.elem(typ, i)
   328  			return slotElem
   329  		}
   330  		match = match.removeFirst()
   331  	}
   332  
   333  	// There can't be deleted slots, small maps can't have them
   334  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   335  	// more efficient than matchEmpty.
   336  	match = g.ctrls().matchEmptyOrDeleted()
   337  	if match == 0 {
   338  		fatal("small map with no empty slot (concurrent map writes?)")
   339  	}
   340  
   341  	i := match.first()
   342  
   343  	slotKey := g.key(typ, i)
   344  	*(*unsafe.Pointer)(slotKey) = key
   345  
   346  	slotElem := g.elem(typ, i)
   347  
   348  	g.ctrls().set(i, ctrl(h2(hash)))
   349  	m.used++
   350  
   351  	return slotElem
   352  }
   353  
   354  // Key is a 64-bit pointer (only called on 64-bit GOARCH).
   355  //
   356  //go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
   357  func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   358  	if m == nil {
   359  		panic(errNilAssign)
   360  	}
   361  	if race.Enabled {
   362  		callerpc := sys.GetCallerPC()
   363  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   364  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   365  	}
   366  	if m.writing != 0 {
   367  		fatal("concurrent map writes")
   368  	}
   369  
   370  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   371  
   372  	// Set writing after calling Hasher, since Hasher may panic, in which
   373  	// case we have not actually done a write.
   374  	m.writing ^= 1 // toggle, see comment on writing
   375  
   376  	if m.dirPtr == nil {
   377  		m.growToSmall(typ)
   378  	}
   379  
   380  	if m.dirLen == 0 {
   381  		if m.used < abi.SwissMapGroupSlots {
   382  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   383  
   384  			if m.writing == 0 {
   385  				fatal("concurrent map writes")
   386  			}
   387  			m.writing ^= 1
   388  
   389  			return elem
   390  		}
   391  
   392  		// Can't fit another entry, grow to full size map.
   393  		m.growToTable(typ)
   394  	}
   395  
   396  	var slotElem unsafe.Pointer
   397  outer:
   398  	for {
   399  		// Select table.
   400  		idx := m.directoryIndex(hash)
   401  		t := m.directoryAt(idx)
   402  
   403  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   404  
   405  		// As we look for a match, keep track of the first deleted slot
   406  		// we find, which we'll use to insert the new entry if
   407  		// necessary.
   408  		var firstDeletedGroup groupReference
   409  		var firstDeletedSlot uintptr
   410  
   411  		for ; ; seq = seq.next() {
   412  			g := t.groups.group(typ, seq.offset)
   413  			match := g.ctrls().matchH2(h2(hash))
   414  
   415  			// Look for an existing slot containing this key.
   416  			for match != 0 {
   417  				i := match.first()
   418  
   419  				slotKey := g.key(typ, i)
   420  				if key == *(*unsafe.Pointer)(slotKey) {
   421  					slotElem = g.elem(typ, i)
   422  
   423  					t.checkInvariants(typ, m)
   424  					break outer
   425  				}
   426  				match = match.removeFirst()
   427  			}
   428  
   429  			// No existing slot for this key in this group. Is this the end
   430  			// of the probe sequence?
   431  			match = g.ctrls().matchEmptyOrDeleted()
   432  			if match == 0 {
   433  				continue // nothing but filled slots. Keep probing.
   434  			}
   435  			i := match.first()
   436  			if g.ctrls().get(i) == ctrlDeleted {
   437  				// There are some deleted slots. Remember
   438  				// the first one, and keep probing.
   439  				if firstDeletedGroup.data == nil {
   440  					firstDeletedGroup = g
   441  					firstDeletedSlot = i
   442  				}
   443  				continue
   444  			}
   445  			// We've found an empty slot, which means we've reached the end of
   446  			// the probe sequence.
   447  
   448  			// If we found a deleted slot along the way, we can
   449  			// replace it without consuming growthLeft.
   450  			if firstDeletedGroup.data != nil {
   451  				g = firstDeletedGroup
   452  				i = firstDeletedSlot
   453  				t.growthLeft++ // will be decremented below to become a no-op.
   454  			}
   455  
   456  			// If there is room left to grow, just insert the new entry.
   457  			if t.growthLeft > 0 {
   458  				slotKey := g.key(typ, i)
   459  				*(*unsafe.Pointer)(slotKey) = key
   460  
   461  				slotElem = g.elem(typ, i)
   462  
   463  				g.ctrls().set(i, ctrl(h2(hash)))
   464  				t.growthLeft--
   465  				t.used++
   466  				m.used++
   467  
   468  				t.checkInvariants(typ, m)
   469  				break outer
   470  			}
   471  
   472  			t.rehash(typ, m)
   473  			continue outer
   474  		}
   475  	}
   476  
   477  	if m.writing == 0 {
   478  		fatal("concurrent map writes")
   479  	}
   480  	m.writing ^= 1
   481  
   482  	return slotElem
   483  }
   484  
   485  //go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
   486  func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) {
   487  	if race.Enabled {
   488  		callerpc := sys.GetCallerPC()
   489  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   490  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   491  	}
   492  
   493  	if m == nil || m.Used() == 0 {
   494  		return
   495  	}
   496  
   497  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   498  }
   499  

View as plain text