Source file src/internal/runtime/maps/map.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package maps implements Go's builtin map type. 6 package maps 7 8 import ( 9 "internal/abi" 10 "internal/goarch" 11 "internal/runtime/math" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // This package contains the implementation of Go's builtin map type. 17 // 18 // The map design is based on Abseil's "Swiss Table" map design 19 // (https://abseil.io/about/design/swisstables), with additional modifications 20 // to cover Go's additional requirements, discussed below. 21 // 22 // Terminology: 23 // - Slot: A storage location of a single key/element pair. 24 // - Group: A group of abi.MapGroupSlots (8) slots, plus a control word. 25 // - Control word: An 8-byte word which denotes whether each slot is empty, 26 // deleted, or used. If a slot is used, its control byte also contains the 27 // lower 7 bits of the hash (H2). 28 // - H1: Upper 57 bits of a hash. 29 // - H2: Lower 7 bits of a hash. 30 // - Table: A complete "Swiss Table" hash table. A table consists of one or 31 // more groups for storage plus metadata to handle operation and determining 32 // when to grow. 33 // - Map: The top-level Map type consists of zero or more tables for storage. 34 // The upper bits of the hash select which table a key belongs to. 35 // - Directory: Array of the tables used by the map. 36 // 37 // At its core, the table design is similar to a traditional open-addressed 38 // hash table. Storage consists of an array of groups, which effectively means 39 // an array of key/elem slots with some control words interspersed. Lookup uses 40 // the hash to determine an initial group to check. If, due to collisions, this 41 // group contains no match, the probe sequence selects the next group to check 42 // (see below for more detail about the probe sequence). 43 // 44 // The key difference occurs within a group. In a standard open-addressed 45 // linear probed hash table, we would check each slot one at a time to find a 46 // match. A swiss table utilizes the extra control word to check all 8 slots in 47 // parallel. 48 // 49 // Each byte in the control word corresponds to one of the slots in the group. 50 // In each byte, 1 bit is used to indicate whether the slot is in use, or if it 51 // is empty/deleted. The other 7 bits contain the lower 7 bits of the hash for 52 // the key in that slot. See [ctrl] for the exact encoding. 53 // 54 // During lookup, we can use some clever bitwise manipulation to compare all 8 55 // 7-bit hashes against the input hash in parallel (see [ctrlGroup.matchH2]). 56 // That is, we effectively perform 8 steps of probing in a single operation. 57 // With SIMD instructions, this could be extended to 16 slots with a 16-byte 58 // control word. 59 // 60 // Since we only use 7 bits of the 64 bit hash, there is a 1 in 128 (~0.7%) 61 // probability of false positive on each slot, but that's fine: we always need 62 // double check each match with a standard key comparison regardless. 63 // 64 // Probing 65 // 66 // Probing is done using the upper 57 bits (H1) of the hash as an index into 67 // the groups array. Probing walks through the groups using quadratic probing 68 // until it finds a group with a match or a group with an empty slot. See 69 // [probeSeq] for specifics about the probe sequence. Note the probe 70 // invariants: the number of groups must be a power of two, and the end of a 71 // probe sequence must be a group with an empty slot (the table can never be 72 // 100% full). 73 // 74 // Deletion 75 // 76 // Probing stops when it finds a group with an empty slot. This affects 77 // deletion: when deleting from a completely full group, we must not mark the 78 // slot as empty, as there could be more slots used later in a probe sequence 79 // and this deletion would cause probing to stop too early. Instead, we mark 80 // such slots as "deleted" with a tombstone. If the group still has an empty 81 // slot, we don't need a tombstone and directly mark the slot empty. Insert 82 // prioritizes reuse of tombstones over filling an empty slots. Otherwise, 83 // tombstones are only completely cleared during grow, as an in-place cleanup 84 // complicates iteration. 85 // 86 // Growth 87 // 88 // The probe sequence depends on the number of groups. Thus, when growing the 89 // group count all slots must be reordered to match the new probe sequence. In 90 // other words, an entire table must be grown at once. 91 // 92 // In order to support incremental growth, the map splits its contents across 93 // multiple tables. Each table is still a full hash table, but an individual 94 // table may only service a subset of the hash space. Growth occurs on 95 // individual tables, so while an entire table must grow at once, each of these 96 // grows is only a small portion of a map. The maximum size of a single grow is 97 // limited by limiting the maximum size of a table before it is split into 98 // multiple tables. 99 // 100 // A map starts with a single table. Up to [maxTableCapacity], growth simply 101 // replaces this table with a replacement with double capacity. Beyond this 102 // limit, growth splits the table into two. 103 // 104 // The map uses "extendible hashing" to select which table to use. In 105 // extendible hashing, we use the upper bits of the hash as an index into an 106 // array of tables (called the "directory"). The number of bits uses increases 107 // as the number of tables increases. For example, when there is only 1 table, 108 // we use 0 bits (no selection necessary). When there are 2 tables, we use 1 109 // bit to select either the 0th or 1st table. [Map.globalDepth] is the number 110 // of bits currently used for table selection, and by extension (1 << 111 // globalDepth), the size of the directory. 112 // 113 // Note that each table has its own load factor and grows independently. If the 114 // 1st bucket grows, it will split. We'll need 2 bits to select tables, though 115 // we'll have 3 tables total rather than 4. We support this by allowing 116 // multiple indices to point to the same table. This example: 117 // 118 // directory (globalDepth=2) 119 // +----+ 120 // | 00 | --\ 121 // +----+ +--> table (localDepth=1) 122 // | 01 | --/ 123 // +----+ 124 // | 10 | ------> table (localDepth=2) 125 // +----+ 126 // | 11 | ------> table (localDepth=2) 127 // +----+ 128 // 129 // Tables track the depth they were created at (localDepth). It is necessary to 130 // grow the directory when splitting a table where globalDepth == localDepth. 131 // 132 // Iteration 133 // 134 // Iteration is the most complex part of the map due to Go's generous iteration 135 // semantics. A summary of semantics from the spec: 136 // 1. Adding and/or deleting entries during iteration MUST NOT cause iteration 137 // to return the same entry more than once. 138 // 2. Entries added during iteration MAY be returned by iteration. 139 // 3. Entries modified during iteration MUST return their latest value. 140 // 4. Entries deleted during iteration MUST NOT be returned by iteration. 141 // 5. Iteration order is unspecified. In the implementation, it is explicitly 142 // randomized. 143 // 144 // If the map never grows, these semantics are straightforward: just iterate 145 // over every table in the directory and every group and slot in each table. 146 // These semantics all land as expected. 147 // 148 // If the map grows during iteration, things complicate significantly. First 149 // and foremost, we need to track which entries we already returned to satisfy 150 // (1). There are three types of grow: 151 // a. A table replaced by a single larger table. 152 // b. A table split into two replacement tables. 153 // c. Growing the directory (occurs as part of (b) if necessary). 154 // 155 // For all of these cases, the replacement table(s) will have a different probe 156 // sequence, so simply tracking the current group and slot indices is not 157 // sufficient. 158 // 159 // For (a) and (b), note that grows of tables other than the one we are 160 // currently iterating over are irrelevant. 161 // 162 // We handle (a) and (b) by having the iterator keep a reference to the table 163 // it is currently iterating over, even after the table is replaced. We keep 164 // iterating over the original table to maintain the iteration order and avoid 165 // violating (1). Any new entries added only to the replacement table(s) will 166 // be skipped (allowed by (2)). To avoid violating (3) or (4), while we use the 167 // original table to select the keys, we must look them up again in the new 168 // table(s) to determine if they have been modified or deleted. There is yet 169 // another layer of complexity if the key does not compare equal itself. See 170 // [Iter.Next] for the gory details. 171 // 172 // Note that for (b) once we finish iterating over the old table we'll need to 173 // skip the next entry in the directory, as that contains the second split of 174 // the old table. We can use the old table's localDepth to determine the next 175 // logical index to use. 176 // 177 // For (b), we must adjust the current directory index when the directory 178 // grows. This is more straightforward, as the directory orders remains the 179 // same after grow, so we just double the index if the directory size doubles. 180 181 // Extracts the H1 portion of a hash: the 57 upper bits. 182 // TODO(prattmic): what about 32-bit systems? 183 func h1(h uintptr) uintptr { 184 return h >> 7 185 } 186 187 // Extracts the H2 portion of a hash: the 7 bits not used for h1. 188 // 189 // These are used as an occupied control byte. 190 func h2(h uintptr) uintptr { 191 return h & 0x7f 192 } 193 194 // Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:MapType. 195 type Map struct { 196 // The number of filled slots (i.e. the number of elements in all 197 // tables). Excludes deleted slots. 198 // Must be first (known by the compiler, for len() builtin). 199 used uint64 200 201 // seed is the hash seed, computed as a unique random number per map. 202 seed uintptr 203 204 // The directory of tables. 205 // 206 // Normally dirPtr points to an array of table pointers 207 // 208 // dirPtr *[dirLen]*table 209 // 210 // The length (dirLen) of this array is `1 << globalDepth`. Multiple 211 // entries may point to the same table. See top-level comment for more 212 // details. 213 // 214 // Small map optimization: if the map always contained 215 // abi.MapGroupSlots or fewer entries, it fits entirely in a 216 // single group. In that case dirPtr points directly to a single group. 217 // 218 // dirPtr *group 219 // 220 // In this case, dirLen is 0. used counts the number of used slots in 221 // the group. Note that small maps never have deleted slots (as there 222 // is no probe sequence to maintain). 223 dirPtr unsafe.Pointer 224 dirLen int 225 226 // The number of bits to use in table directory lookups. 227 globalDepth uint8 228 229 // The number of bits to shift out of the hash for directory lookups. 230 // On 64-bit systems, this is 64 - globalDepth. 231 globalShift uint8 232 233 // writing is a flag that is toggled (XOR 1) while the map is being 234 // written. Normally it is set to 1 when writing, but if there are 235 // multiple concurrent writers, then toggling increases the probability 236 // that both sides will detect the race. 237 writing uint8 238 239 // tombstonePossible is false if we know that no table in this map 240 // contains a tombstone. 241 tombstonePossible bool 242 243 // clearSeq is a sequence counter of calls to Clear. It is used to 244 // detect map clears during iteration. 245 clearSeq uint64 246 } 247 248 // Use 64-bit hash on 64-bit systems, except on Wasm, where we use 249 // 32-bit hash (see runtime/hash32.go). 250 const Use64BitHash = goarch.PtrSize == 8 && goarch.IsWasm == 0 251 252 func depthToShift(depth uint8) uint8 { 253 if !Use64BitHash { 254 return 32 - depth 255 } 256 return 64 - depth 257 } 258 259 // If m is non-nil, it should be used rather than allocating. 260 // 261 // maxAlloc should be runtime.maxAlloc. 262 // 263 // TODO(prattmic): Put maxAlloc somewhere accessible. 264 func NewMap(mt *abi.MapType, hint uintptr, m *Map, maxAlloc uintptr) *Map { 265 if m == nil { 266 m = new(Map) 267 } 268 269 m.seed = uintptr(rand()) 270 271 if hint <= abi.MapGroupSlots { 272 // A small map can fill all 8 slots, so no need to increase 273 // target capacity. 274 // 275 // In fact, since an 8 slot group is what the first assignment 276 // to an empty map would allocate anyway, it doesn't matter if 277 // we allocate here or on the first assignment. 278 // 279 // Thus we just return without allocating. (We'll save the 280 // allocation completely if no assignment comes.) 281 282 // Note that the compiler may have initialized m.dirPtr with a 283 // pointer to a stack-allocated group, in which case we already 284 // have a group. The control word is already initialized. 285 286 return m 287 } 288 289 // Full size map. 290 291 // Set initial capacity to hold hint entries without growing in the 292 // average case. 293 targetCapacity := (hint * abi.MapGroupSlots) / maxAvgGroupLoad 294 if targetCapacity < hint { // overflow 295 return m // return an empty map. 296 } 297 298 dirSize := (uint64(targetCapacity) + maxTableCapacity - 1) / maxTableCapacity 299 dirSize, overflow := alignUpPow2(dirSize) 300 if overflow || dirSize > uint64(math.MaxUintptr) { 301 return m // return an empty map. 302 } 303 304 // Reject hints that are obviously too large. 305 groups, overflow := math.MulUintptr(uintptr(dirSize), maxTableCapacity) 306 if overflow { 307 return m // return an empty map. 308 } else { 309 mem, overflow := math.MulUintptr(groups, mt.GroupSize) 310 if overflow || mem > maxAlloc { 311 return m // return an empty map. 312 } 313 } 314 315 m.globalDepth = uint8(sys.TrailingZeros64(dirSize)) 316 m.globalShift = depthToShift(m.globalDepth) 317 318 directory := make([]*table, dirSize) 319 320 for i := range directory { 321 // TODO: Think more about initial table capacity. 322 directory[i] = newTable(mt, uint64(targetCapacity)/dirSize, i, m.globalDepth) 323 } 324 325 m.dirPtr = unsafe.Pointer(&directory[0]) 326 m.dirLen = len(directory) 327 328 return m 329 } 330 331 func NewEmptyMap() *Map { 332 m := new(Map) 333 m.seed = uintptr(rand()) 334 // See comment in NewMap. No need to eager allocate a group. 335 return m 336 } 337 338 func (m *Map) directoryIndex(hash uintptr) uintptr { 339 if m.dirLen == 1 { 340 return 0 341 } 342 return hash >> (m.globalShift & 63) 343 } 344 345 func (m *Map) directoryAt(i uintptr) *table { 346 return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) 347 } 348 349 func (m *Map) directorySet(i uintptr, nt *table) { 350 *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt 351 } 352 353 func (m *Map) replaceTable(nt *table) { 354 // The number of entries that reference the same table doubles for each 355 // time the globalDepth grows without the table splitting. 356 entries := 1 << (m.globalDepth - nt.localDepth) 357 for i := 0; i < entries; i++ { 358 //m.directory[nt.index+i] = nt 359 m.directorySet(uintptr(nt.index+i), nt) 360 } 361 } 362 363 func (m *Map) installTableSplit(old, left, right *table) { 364 if old.localDepth == m.globalDepth { 365 // No room for another level in the directory. Grow the 366 // directory. 367 newDir := make([]*table, m.dirLen*2) 368 for i := range m.dirLen { 369 t := m.directoryAt(uintptr(i)) 370 newDir[2*i] = t 371 newDir[2*i+1] = t 372 // t may already exist in multiple indices. We should 373 // only update t.index once. Since the index must 374 // increase, seeing the original index means this must 375 // be the first time we've encountered this table. 376 if t.index == i { 377 t.index = 2 * i 378 } 379 } 380 m.globalDepth++ 381 m.globalShift-- 382 //m.directory = newDir 383 m.dirPtr = unsafe.Pointer(&newDir[0]) 384 m.dirLen = len(newDir) 385 } 386 387 // N.B. left and right may still consume multiple indices if the 388 // directory has grown multiple times since old was last split. 389 left.index = old.index 390 m.replaceTable(left) 391 392 entries := 1 << (m.globalDepth - left.localDepth) 393 right.index = left.index + entries 394 m.replaceTable(right) 395 } 396 397 func (m *Map) Used() uint64 { 398 return m.used 399 } 400 401 // Get performs a lookup of the key that key points to. It returns a pointer to 402 // the element, or false if the key doesn't exist. 403 func (m *Map) Get(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 404 return m.getWithoutKey(typ, key) 405 } 406 407 func (m *Map) getWithKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 408 if m.Used() == 0 { 409 return nil, nil, false 410 } 411 412 if m.writing != 0 { 413 fatal("concurrent map read and map write") 414 } 415 416 hash := typ.Hasher(key, m.seed) 417 418 if m.dirLen == 0 { 419 return m.getWithKeySmall(typ, hash, key) 420 } 421 422 idx := m.directoryIndex(hash) 423 return m.directoryAt(idx).getWithKey(typ, hash, key) 424 } 425 426 func (m *Map) getWithoutKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 427 if m.Used() == 0 { 428 return nil, false 429 } 430 431 if m.writing != 0 { 432 fatal("concurrent map read and map write") 433 } 434 435 hash := typ.Hasher(key, m.seed) 436 437 if m.dirLen == 0 { 438 _, elem, ok := m.getWithKeySmall(typ, hash, key) 439 return elem, ok 440 } 441 442 idx := m.directoryIndex(hash) 443 return m.directoryAt(idx).getWithoutKey(typ, hash, key) 444 } 445 446 func (m *Map) getWithKeySmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 447 g := groupReference{ 448 data: m.dirPtr, 449 } 450 451 match := g.ctrls().matchH2(h2(hash)) 452 453 for match != 0 { 454 i := match.first() 455 456 slotKey := g.key(typ, i) 457 if typ.IndirectKey() { 458 slotKey = *((*unsafe.Pointer)(slotKey)) 459 } 460 461 if typ.Key.Equal(key, slotKey) { 462 slotElem := g.elem(typ, i) 463 if typ.IndirectElem() { 464 slotElem = *((*unsafe.Pointer)(slotElem)) 465 } 466 return slotKey, slotElem, true 467 } 468 469 match = match.removeFirst() 470 } 471 472 // No match here means key is not in the map. 473 // (A single group means no need to probe or check for empty). 474 return nil, nil, false 475 } 476 477 func (m *Map) Put(typ *abi.MapType, key, elem unsafe.Pointer) { 478 slotElem := m.PutSlot(typ, key) 479 typedmemmove(typ.Elem, slotElem, elem) 480 } 481 482 // PutSlot returns a pointer to the element slot where an inserted element 483 // should be written. 484 // 485 // PutSlot never returns nil. 486 func (m *Map) PutSlot(typ *abi.MapType, key unsafe.Pointer) unsafe.Pointer { 487 if m.writing != 0 { 488 fatal("concurrent map writes") 489 } 490 491 hash := typ.Hasher(key, m.seed) 492 493 // Set writing after calling Hasher, since Hasher may panic, in which 494 // case we have not actually done a write. 495 m.writing ^= 1 // toggle, see comment on writing 496 497 if m.dirPtr == nil { 498 m.growToSmall(typ) 499 } 500 501 if m.dirLen == 0 { 502 if m.used < abi.MapGroupSlots { 503 elem := m.putSlotSmall(typ, hash, key) 504 505 if m.writing == 0 { 506 fatal("concurrent map writes") 507 } 508 m.writing ^= 1 509 510 return elem 511 } 512 513 // Can't fit another entry, grow to full size map. 514 // 515 // TODO(prattmic): If this is an update to an existing key then 516 // we actually don't need to grow. 517 m.growToTable(typ) 518 } 519 520 for { 521 idx := m.directoryIndex(hash) 522 elem, ok := m.directoryAt(idx).PutSlot(typ, m, hash, key) 523 if !ok { 524 continue 525 } 526 527 if m.writing == 0 { 528 fatal("concurrent map writes") 529 } 530 m.writing ^= 1 531 532 return elem 533 } 534 } 535 536 func (m *Map) putSlotSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { 537 g := groupReference{ 538 data: m.dirPtr, 539 } 540 541 match := g.ctrls().matchH2(h2(hash)) 542 543 // Look for an existing slot containing this key. 544 for match != 0 { 545 i := match.first() 546 547 slotKey := g.key(typ, i) 548 if typ.IndirectKey() { 549 slotKey = *((*unsafe.Pointer)(slotKey)) 550 } 551 if typ.Key.Equal(key, slotKey) { 552 if typ.NeedKeyUpdate() { 553 typedmemmove(typ.Key, slotKey, key) 554 } 555 556 slotElem := g.elem(typ, i) 557 if typ.IndirectElem() { 558 slotElem = *((*unsafe.Pointer)(slotElem)) 559 } 560 561 return slotElem 562 } 563 match = match.removeFirst() 564 } 565 566 // There can't be deleted slots, small maps can't have them 567 // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 568 // more efficient than matchEmpty. 569 match = g.ctrls().matchEmptyOrDeleted() 570 if match == 0 { 571 fatal("small map with no empty slot (concurrent map writes?)") 572 return nil 573 } 574 575 i := match.first() 576 577 slotKey := g.key(typ, i) 578 if typ.IndirectKey() { 579 kmem := newobject(typ.Key) 580 *(*unsafe.Pointer)(slotKey) = kmem 581 slotKey = kmem 582 } 583 typedmemmove(typ.Key, slotKey, key) 584 585 slotElem := g.elem(typ, i) 586 if typ.IndirectElem() { 587 emem := newobject(typ.Elem) 588 *(*unsafe.Pointer)(slotElem) = emem 589 slotElem = emem 590 } 591 592 g.ctrls().set(i, ctrl(h2(hash))) 593 m.used++ 594 595 return slotElem 596 } 597 598 func (m *Map) growToSmall(typ *abi.MapType) { 599 grp := newGroups(typ, 1) 600 m.dirPtr = grp.data 601 602 g := groupReference{ 603 data: m.dirPtr, 604 } 605 g.ctrls().setEmpty() 606 } 607 608 func (m *Map) growToTable(typ *abi.MapType) { 609 tab := newTable(typ, 2*abi.MapGroupSlots, 0, 0) 610 611 g := groupReference{ 612 data: m.dirPtr, 613 } 614 615 for i := uintptr(0); i < abi.MapGroupSlots; i++ { 616 if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty { 617 // Empty 618 continue 619 } 620 621 key := g.key(typ, i) 622 if typ.IndirectKey() { 623 key = *((*unsafe.Pointer)(key)) 624 } 625 626 elem := g.elem(typ, i) 627 if typ.IndirectElem() { 628 elem = *((*unsafe.Pointer)(elem)) 629 } 630 631 hash := typ.Hasher(key, m.seed) 632 633 tab.uncheckedPutSlot(typ, hash, key, elem) 634 } 635 636 directory := make([]*table, 1) 637 638 directory[0] = tab 639 640 m.dirPtr = unsafe.Pointer(&directory[0]) 641 m.dirLen = len(directory) 642 643 m.globalDepth = 0 644 m.globalShift = depthToShift(m.globalDepth) 645 } 646 647 func (m *Map) Delete(typ *abi.MapType, key unsafe.Pointer) { 648 if m == nil || m.Used() == 0 { 649 if err := mapKeyError(typ, key); err != nil { 650 panic(err) // see issue 23734 651 } 652 return 653 } 654 655 if m.writing != 0 { 656 fatal("concurrent map writes") 657 } 658 659 hash := typ.Hasher(key, m.seed) 660 661 // Set writing after calling Hasher, since Hasher may panic, in which 662 // case we have not actually done a write. 663 m.writing ^= 1 // toggle, see comment on writing 664 665 if m.dirLen == 0 { 666 m.deleteSmall(typ, hash, key) 667 } else { 668 idx := m.directoryIndex(hash) 669 if m.directoryAt(idx).Delete(typ, m, hash, key) { 670 m.tombstonePossible = true 671 } 672 } 673 674 if m.used == 0 { 675 // Reset the hash seed to make it more difficult for attackers 676 // to repeatedly trigger hash collisions. See 677 // https://go.dev/issue/25237. 678 m.seed = uintptr(rand()) 679 } 680 681 if m.writing == 0 { 682 fatal("concurrent map writes") 683 } 684 m.writing ^= 1 685 } 686 687 func (m *Map) deleteSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) { 688 g := groupReference{ 689 data: m.dirPtr, 690 } 691 692 match := g.ctrls().matchH2(h2(hash)) 693 694 for match != 0 { 695 i := match.first() 696 slotKey := g.key(typ, i) 697 origSlotKey := slotKey 698 if typ.IndirectKey() { 699 slotKey = *((*unsafe.Pointer)(slotKey)) 700 } 701 if typ.Key.Equal(key, slotKey) { 702 m.used-- 703 704 if typ.IndirectKey() { 705 // Clearing the pointer is sufficient. 706 *(*unsafe.Pointer)(origSlotKey) = nil 707 } else if typ.Key.Pointers() { 708 // Only bother clearing if there are pointers. 709 typedmemclr(typ.Key, slotKey) 710 } 711 712 slotElem := g.elem(typ, i) 713 if typ.IndirectElem() { 714 // Clearing the pointer is sufficient. 715 *(*unsafe.Pointer)(slotElem) = nil 716 } else { 717 // Unlike keys, always clear the elem (even if 718 // it contains no pointers), as compound 719 // assignment operations depend on cleared 720 // deleted values. See 721 // https://go.dev/issue/25936. 722 typedmemclr(typ.Elem, slotElem) 723 } 724 725 // We only have 1 group, so it is OK to immediately 726 // reuse deleted slots. 727 g.ctrls().set(i, ctrlEmpty) 728 return 729 } 730 match = match.removeFirst() 731 } 732 } 733 734 // Clear deletes all entries from the map resulting in an empty map. 735 func (m *Map) Clear(typ *abi.MapType) { 736 if m == nil || m.Used() == 0 && !m.tombstonePossible { 737 return 738 } 739 740 if m.writing != 0 { 741 fatal("concurrent map writes") 742 } 743 m.writing ^= 1 // toggle, see comment on writing 744 745 if m.dirLen == 0 { 746 m.clearSmall(typ) 747 } else { 748 var lastTab *table 749 for i := range m.dirLen { 750 t := m.directoryAt(uintptr(i)) 751 if t == lastTab { 752 continue 753 } 754 t.Clear(typ) 755 lastTab = t 756 } 757 m.used = 0 758 m.tombstonePossible = false 759 // TODO: shrink directory? 760 } 761 m.clearSeq++ 762 763 // Reset the hash seed to make it more difficult for attackers to 764 // repeatedly trigger hash collisions. See https://go.dev/issue/25237. 765 m.seed = uintptr(rand()) 766 767 if m.writing == 0 { 768 fatal("concurrent map writes") 769 } 770 m.writing ^= 1 771 } 772 773 func (m *Map) clearSmall(typ *abi.MapType) { 774 g := groupReference{ 775 data: m.dirPtr, 776 } 777 778 typedmemclr(typ.Group, g.data) 779 g.ctrls().setEmpty() 780 781 m.used = 0 782 } 783 784 func (m *Map) Clone(typ *abi.MapType) *Map { 785 // Note: this should never be called with a nil map. 786 if m.writing != 0 { 787 fatal("concurrent map clone and map write") 788 } 789 790 // Shallow copy the Map structure. 791 m2 := new(Map) 792 *m2 = *m 793 m = m2 794 795 // We need to just deep copy the dirPtr field. 796 if m.dirPtr == nil { 797 // delayed group allocation, nothing to do. 798 } else if m.dirLen == 0 { 799 // Clone one group. 800 oldGroup := groupReference{data: m.dirPtr} 801 newGroup := groupReference{data: newGroups(typ, 1).data} 802 cloneGroup(typ, newGroup, oldGroup) 803 m.dirPtr = newGroup.data 804 } else { 805 // Clone each (different) table. 806 oldDir := unsafe.Slice((**table)(m.dirPtr), m.dirLen) 807 newDir := make([]*table, m.dirLen) 808 for i, t := range oldDir { 809 if i > 0 && t == oldDir[i-1] { 810 newDir[i] = newDir[i-1] 811 continue 812 } 813 newDir[i] = t.clone(typ) 814 } 815 m.dirPtr = unsafe.Pointer(&newDir[0]) 816 } 817 818 return m 819 } 820 821 func mapKeyError(t *abi.MapType, p unsafe.Pointer) error { 822 if !t.HashMightPanic() { 823 return nil 824 } 825 return mapKeyError2(t.Key, p) 826 } 827 828 func mapKeyError2(t *abi.Type, p unsafe.Pointer) error { 829 if t.TFlag&abi.TFlagRegularMemory != 0 { 830 return nil 831 } 832 switch t.Kind() { 833 case abi.Float32, abi.Float64, abi.Complex64, abi.Complex128, abi.String: 834 return nil 835 case abi.Interface: 836 i := (*abi.InterfaceType)(unsafe.Pointer(t)) 837 var t *abi.Type 838 var pdata *unsafe.Pointer 839 if len(i.Methods) == 0 { 840 a := (*abi.EmptyInterface)(p) 841 t = a.Type 842 if t == nil { 843 return nil 844 } 845 pdata = &a.Data 846 } else { 847 a := (*abi.NonEmptyInterface)(p) 848 if a.ITab == nil { 849 return nil 850 } 851 t = a.ITab.Type 852 pdata = &a.Data 853 } 854 855 if t.Equal == nil { 856 return unhashableTypeError{t} 857 } 858 859 if t.IsDirectIface() { 860 return mapKeyError2(t, unsafe.Pointer(pdata)) 861 } else { 862 return mapKeyError2(t, *pdata) 863 } 864 case abi.Array: 865 a := (*abi.ArrayType)(unsafe.Pointer(t)) 866 for i := uintptr(0); i < a.Len; i++ { 867 if err := mapKeyError2(a.Elem, unsafe.Pointer(uintptr(p)+i*a.Elem.Size_)); err != nil { 868 return err 869 } 870 } 871 return nil 872 case abi.Struct: 873 s := (*abi.StructType)(unsafe.Pointer(t)) 874 for _, f := range s.Fields { 875 if f.Name.IsBlank() { 876 continue 877 } 878 if err := mapKeyError2(f.Typ, unsafe.Pointer(uintptr(p)+f.Offset)); err != nil { 879 return err 880 } 881 } 882 return nil 883 default: 884 // Should never happen, keep this case for robustness. 885 return unhashableTypeError{t} 886 } 887 } 888 889 type unhashableTypeError struct{ typ *abi.Type } 890 891 func (unhashableTypeError) RuntimeError() {} 892 893 func (e unhashableTypeError) Error() string { return "hash of unhashable type: " + typeString(e.typ) } 894 895 // Pushed from runtime 896 // 897 //go:linkname typeString 898 func typeString(typ *abi.Type) string 899