Source file src/internal/runtime/maps/runtime_faststr_swiss.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.swissmap
     6  
     7  package maps
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  // TODO: more string-specific optimizations possible.
    17  
    18  func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) (unsafe.Pointer, bool) {
    19  	g := groupReference{
    20  		data: m.dirPtr,
    21  	}
    22  
    23  	h2 := uint8(h2(hash))
    24  	ctrls := *g.ctrls()
    25  
    26  	for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
    27  		c := uint8(ctrls)
    28  		ctrls >>= 8
    29  		if c != h2 {
    30  			continue
    31  		}
    32  
    33  		slotKey := g.key(typ, i)
    34  
    35  		if key == *(*string)(slotKey) {
    36  			slotElem := g.elem(typ, i)
    37  			return slotElem, true
    38  		}
    39  	}
    40  
    41  	return nil, false
    42  }
    43  
    44  //go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
    45  func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
    46  	if race.Enabled && m != nil {
    47  		callerpc := sys.GetCallerPC()
    48  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    49  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    50  	}
    51  
    52  	if m == nil || m.Used() == 0 {
    53  		return unsafe.Pointer(&zeroVal[0])
    54  	}
    55  
    56  	if m.writing != 0 {
    57  		fatal("concurrent map read and map write")
    58  		return nil
    59  	}
    60  
    61  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    62  
    63  	if m.dirLen <= 0 {
    64  		elem, ok := m.getWithoutKeySmallFastStr(typ, hash, key)
    65  		if !ok {
    66  			return unsafe.Pointer(&zeroVal[0])
    67  		}
    68  		return elem
    69  	}
    70  
    71  	// Select table.
    72  	idx := m.directoryIndex(hash)
    73  	t := m.directoryAt(idx)
    74  
    75  	// Probe table.
    76  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    77  	for ; ; seq = seq.next() {
    78  		g := t.groups.group(typ, seq.offset)
    79  
    80  		match := g.ctrls().matchH2(h2(hash))
    81  
    82  		for match != 0 {
    83  			i := match.first()
    84  
    85  			slotKey := g.key(typ, i)
    86  			if key == *(*string)(slotKey) {
    87  				slotElem := g.elem(typ, i)
    88  				return slotElem
    89  			}
    90  			match = match.removeFirst()
    91  		}
    92  
    93  		match = g.ctrls().matchEmpty()
    94  		if match != 0 {
    95  			// Finding an empty slot means we've reached the end of
    96  			// the probe sequence.
    97  			return unsafe.Pointer(&zeroVal[0])
    98  		}
    99  	}
   100  }
   101  
   102  //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
   103  func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) {
   104  	if race.Enabled && m != nil {
   105  		callerpc := sys.GetCallerPC()
   106  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   107  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   108  	}
   109  
   110  	if m == nil || m.Used() == 0 {
   111  		return unsafe.Pointer(&zeroVal[0]), false
   112  	}
   113  
   114  	if m.writing != 0 {
   115  		fatal("concurrent map read and map write")
   116  		return nil, false
   117  	}
   118  
   119  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   120  
   121  	if m.dirLen <= 0 {
   122  		elem, ok := m.getWithoutKeySmallFastStr(typ, hash, key)
   123  		if !ok {
   124  			return unsafe.Pointer(&zeroVal[0]), false
   125  		}
   126  		return elem, true
   127  	}
   128  
   129  	// Select table.
   130  	idx := m.directoryIndex(hash)
   131  	t := m.directoryAt(idx)
   132  
   133  	// Probe table.
   134  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   135  	for ; ; seq = seq.next() {
   136  		g := t.groups.group(typ, seq.offset)
   137  
   138  		match := g.ctrls().matchH2(h2(hash))
   139  
   140  		for match != 0 {
   141  			i := match.first()
   142  
   143  			slotKey := g.key(typ, i)
   144  			if key == *(*string)(slotKey) {
   145  				slotElem := g.elem(typ, i)
   146  				return slotElem, true
   147  			}
   148  			match = match.removeFirst()
   149  		}
   150  
   151  		match = g.ctrls().matchEmpty()
   152  		if match != 0 {
   153  			// Finding an empty slot means we've reached the end of
   154  			// the probe sequence.
   155  			return unsafe.Pointer(&zeroVal[0]), false
   156  		}
   157  	}
   158  }
   159  
   160  func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer {
   161  	g := groupReference{
   162  		data: m.dirPtr,
   163  	}
   164  
   165  	match := g.ctrls().matchH2(h2(hash))
   166  
   167  	// Look for an existing slot containing this key.
   168  	for match != 0 {
   169  		i := match.first()
   170  
   171  		slotKey := g.key(typ, i)
   172  		if key == *(*string)(slotKey) {
   173  			// Key needs update, as the backing storage may differ.
   174  			*(*string)(slotKey) = key
   175  			slotElem := g.elem(typ, i)
   176  			return slotElem
   177  		}
   178  		match = match.removeFirst()
   179  	}
   180  
   181  	// There can't be deleted slots, small maps can't have them
   182  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   183  	// more efficient than matchEmpty.
   184  	match = g.ctrls().matchEmptyOrDeleted()
   185  	if match == 0 {
   186  		fatal("small map with no empty slot (concurrent map writes?)")
   187  	}
   188  
   189  	i := match.first()
   190  
   191  	slotKey := g.key(typ, i)
   192  	*(*string)(slotKey) = key
   193  
   194  	slotElem := g.elem(typ, i)
   195  
   196  	g.ctrls().set(i, ctrl(h2(hash)))
   197  	m.used++
   198  
   199  	return slotElem
   200  }
   201  
   202  //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
   203  func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
   204  	if m == nil {
   205  		panic(errNilAssign)
   206  	}
   207  	if race.Enabled {
   208  		callerpc := sys.GetCallerPC()
   209  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   210  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   211  	}
   212  	if m.writing != 0 {
   213  		fatal("concurrent map writes")
   214  	}
   215  
   216  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
   217  
   218  	// Set writing after calling Hasher, since Hasher may panic, in which
   219  	// case we have not actually done a write.
   220  	m.writing ^= 1 // toggle, see comment on writing
   221  
   222  	if m.dirPtr == nil {
   223  		m.growToSmall(typ)
   224  	}
   225  
   226  	if m.dirLen == 0 {
   227  		if m.used < abi.SwissMapGroupSlots {
   228  			elem := m.putSlotSmallFastStr(typ, hash, key)
   229  
   230  			if m.writing == 0 {
   231  				fatal("concurrent map writes")
   232  			}
   233  			m.writing ^= 1
   234  
   235  			return elem
   236  		}
   237  
   238  		// Can't fit another entry, grow to full size map.
   239  		m.growToTable(typ)
   240  	}
   241  
   242  	var slotElem unsafe.Pointer
   243  outer:
   244  	for {
   245  		// Select table.
   246  		idx := m.directoryIndex(hash)
   247  		t := m.directoryAt(idx)
   248  
   249  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   250  
   251  		// As we look for a match, keep track of the first deleted slot
   252  		// we find, which we'll use to insert the new entry if
   253  		// necessary.
   254  		var firstDeletedGroup groupReference
   255  		var firstDeletedSlot uintptr
   256  
   257  		for ; ; seq = seq.next() {
   258  			g := t.groups.group(typ, seq.offset)
   259  			match := g.ctrls().matchH2(h2(hash))
   260  
   261  			// Look for an existing slot containing this key.
   262  			for match != 0 {
   263  				i := match.first()
   264  
   265  				slotKey := g.key(typ, i)
   266  				if key == *(*string)(slotKey) {
   267  					// Key needs update, as the backing
   268  					// storage may differ.
   269  					*(*string)(slotKey) = key
   270  					slotElem = g.elem(typ, i)
   271  
   272  					t.checkInvariants(typ, m)
   273  					break outer
   274  				}
   275  				match = match.removeFirst()
   276  			}
   277  
   278  			// No existing slot for this key in this group. Is this the end
   279  			// of the probe sequence?
   280  			match = g.ctrls().matchEmptyOrDeleted()
   281  			if match == 0 {
   282  				continue // nothing but filled slots. Keep probing.
   283  			}
   284  			i := match.first()
   285  			if g.ctrls().get(i) == ctrlDeleted {
   286  				// There are some deleted slots. Remember
   287  				// the first one, and keep probing.
   288  				if firstDeletedGroup.data == nil {
   289  					firstDeletedGroup = g
   290  					firstDeletedSlot = i
   291  				}
   292  				continue
   293  			}
   294  			// We've found an empty slot, which means we've reached the end of
   295  			// the probe sequence.
   296  
   297  			// If we found a deleted slot along the way, we can
   298  			// replace it without consuming growthLeft.
   299  			if firstDeletedGroup.data != nil {
   300  				g = firstDeletedGroup
   301  				i = firstDeletedSlot
   302  				t.growthLeft++ // will be decremented below to become a no-op.
   303  			}
   304  
   305  			// If there is room left to grow, just insert the new entry.
   306  			if t.growthLeft > 0 {
   307  				slotKey := g.key(typ, i)
   308  				*(*string)(slotKey) = key
   309  
   310  				slotElem = g.elem(typ, i)
   311  
   312  				g.ctrls().set(i, ctrl(h2(hash)))
   313  				t.growthLeft--
   314  				t.used++
   315  				m.used++
   316  
   317  				t.checkInvariants(typ, m)
   318  				break outer
   319  			}
   320  
   321  			t.rehash(typ, m)
   322  			continue outer
   323  		}
   324  	}
   325  
   326  	if m.writing == 0 {
   327  		fatal("concurrent map writes")
   328  	}
   329  	m.writing ^= 1
   330  
   331  	return slotElem
   332  }
   333  
   334  //go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
   335  func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) {
   336  	if race.Enabled {
   337  		callerpc := sys.GetCallerPC()
   338  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   339  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   340  	}
   341  
   342  	if m == nil || m.Used() == 0 {
   343  		return
   344  	}
   345  
   346  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   347  }
   348  

View as plain text