Source file src/internal/runtime/maps/map.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package maps implements Go's builtin map type. 6 package maps 7 8 import ( 9 "internal/abi" 10 "internal/goarch" 11 "internal/runtime/math" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // This package contains the implementation of Go's builtin map type. 17 // 18 // The map design is based on Abseil's "Swiss Table" map design 19 // (https://abseil.io/about/design/swisstables), with additional modifications 20 // to cover Go's additional requirements, discussed below. 21 // 22 // Terminology: 23 // - Slot: A storage location of a single key/element pair. 24 // - Group: A group of abi.SwissMapGroupSlots (8) slots, plus a control word. 25 // - Control word: An 8-byte word which denotes whether each slot is empty, 26 // deleted, or used. If a slot is used, its control byte also contains the 27 // lower 7 bits of the hash (H2). 28 // - H1: Upper 57 bits of a hash. 29 // - H2: Lower 7 bits of a hash. 30 // - Table: A complete "Swiss Table" hash table. A table consists of one or 31 // more groups for storage plus metadata to handle operation and determining 32 // when to grow. 33 // - Map: The top-level Map type consists of zero or more tables for storage. 34 // The upper bits of the hash select which table a key belongs to. 35 // - Directory: Array of the tables used by the map. 36 // 37 // At its core, the table design is similar to a traditional open-addressed 38 // hash table. Storage consists of an array of groups, which effectively means 39 // an array of key/elem slots with some control words interspersed. Lookup uses 40 // the hash to determine an initial group to check. If, due to collisions, this 41 // group contains no match, the probe sequence selects the next group to check 42 // (see below for more detail about the probe sequence). 43 // 44 // The key difference occurs within a group. In a standard open-addressed 45 // linear probed hash table, we would check each slot one at a time to find a 46 // match. A swiss table utilizes the extra control word to check all 8 slots in 47 // parallel. 48 // 49 // Each byte in the control word corresponds to one of the slots in the group. 50 // In each byte, 1 bit is used to indicate whether the slot is in use, or if it 51 // is empty/deleted. The other 7 bits contain the lower 7 bits of the hash for 52 // the key in that slot. See [ctrl] for the exact encoding. 53 // 54 // During lookup, we can use some clever bitwise manipulation to compare all 8 55 // 7-bit hashes against the input hash in parallel (see [ctrlGroup.matchH2]). 56 // That is, we effectively perform 8 steps of probing in a single operation. 57 // With SIMD instructions, this could be extended to 16 slots with a 16-byte 58 // control word. 59 // 60 // Since we only use 7 bits of the 64 bit hash, there is a 1 in 128 (~0.7%) 61 // probability of false positive on each slot, but that's fine: we always need 62 // double check each match with a standard key comparison regardless. 63 // 64 // Probing 65 // 66 // Probing is done using the upper 57 bits (H1) of the hash as an index into 67 // the groups array. Probing walks through the groups using quadratic probing 68 // until it finds a group with a match or a group with an empty slot. See 69 // [probeSeq] for specifics about the probe sequence. Note the probe 70 // invariants: the number of groups must be a power of two, and the end of a 71 // probe sequence must be a group with an empty slot (the table can never be 72 // 100% full). 73 // 74 // Deletion 75 // 76 // Probing stops when it finds a group with an empty slot. This affects 77 // deletion: when deleting from a completely full group, we must not mark the 78 // slot as empty, as there could be more slots used later in a probe sequence 79 // and this deletion would cause probing to stop too early. Instead, we mark 80 // such slots as "deleted" with a tombstone. If the group still has an empty 81 // slot, we don't need a tombstone and directly mark the slot empty. Insert 82 // prioritizes reuse of tombstones over filling an empty slots. Otherwise, 83 // tombstones are only completely cleared during grow, as an in-place cleanup 84 // complicates iteration. 85 // 86 // Growth 87 // 88 // The probe sequence depends on the number of groups. Thus, when growing the 89 // group count all slots must be reordered to match the new probe sequence. In 90 // other words, an entire table must be grown at once. 91 // 92 // In order to support incremental growth, the map splits its contents across 93 // multiple tables. Each table is still a full hash table, but an individual 94 // table may only service a subset of the hash space. Growth occurs on 95 // individual tables, so while an entire table must grow at once, each of these 96 // grows is only a small portion of a map. The maximum size of a single grow is 97 // limited by limiting the maximum size of a table before it is split into 98 // multiple tables. 99 // 100 // A map starts with a single table. Up to [maxTableCapacity], growth simply 101 // replaces this table with a replacement with double capacity. Beyond this 102 // limit, growth splits the table into two. 103 // 104 // The map uses "extendible hashing" to select which table to use. In 105 // extendible hashing, we use the upper bits of the hash as an index into an 106 // array of tables (called the "directory"). The number of bits uses increases 107 // as the number of tables increases. For example, when there is only 1 table, 108 // we use 0 bits (no selection necessary). When there are 2 tables, we use 1 109 // bit to select either the 0th or 1st table. [Map.globalDepth] is the number 110 // of bits currently used for table selection, and by extension (1 << 111 // globalDepth), the size of the directory. 112 // 113 // Note that each table has its own load factor and grows independently. If the 114 // 1st bucket grows, it will split. We'll need 2 bits to select tables, though 115 // we'll have 3 tables total rather than 4. We support this by allowing 116 // multiple indicies to point to the same table. This example: 117 // 118 // directory (globalDepth=2) 119 // +----+ 120 // | 00 | --\ 121 // +----+ +--> table (localDepth=1) 122 // | 01 | --/ 123 // +----+ 124 // | 10 | ------> table (localDepth=2) 125 // +----+ 126 // | 11 | ------> table (localDepth=2) 127 // +----+ 128 // 129 // Tables track the depth they were created at (localDepth). It is necessary to 130 // grow the directory when splitting a table where globalDepth == localDepth. 131 // 132 // Iteration 133 // 134 // Iteration is the most complex part of the map due to Go's generous iteration 135 // semantics. A summary of semantics from the spec: 136 // 1. Adding and/or deleting entries during iteration MUST NOT cause iteration 137 // to return the same entry more than once. 138 // 2. Entries added during iteration MAY be returned by iteration. 139 // 3. Entries modified during iteration MUST return their latest value. 140 // 4. Entries deleted during iteration MUST NOT be returned by iteration. 141 // 5. Iteration order is unspecified. In the implementation, it is explicitly 142 // randomized. 143 // 144 // If the map never grows, these semantics are straightforward: just iterate 145 // over every table in the directory and every group and slot in each table. 146 // These semantics all land as expected. 147 // 148 // If the map grows during iteration, things complicate significantly. First 149 // and foremost, we need to track which entries we already returned to satisfy 150 // (1). There are three types of grow: 151 // a. A table replaced by a single larger table. 152 // b. A table split into two replacement tables. 153 // c. Growing the directory (occurs as part of (b) if necessary). 154 // 155 // For all of these cases, the replacement table(s) will have a different probe 156 // sequence, so simply tracking the current group and slot indices is not 157 // sufficient. 158 // 159 // For (a) and (b), note that grows of tables other than the one we are 160 // currently iterating over are irrelevant. 161 // 162 // We handle (a) and (b) by having the iterator keep a reference to the table 163 // it is currently iterating over, even after the table is replaced. We keep 164 // iterating over the original table to maintain the iteration order and avoid 165 // violating (1). Any new entries added only to the replacement table(s) will 166 // be skipped (allowed by (2)). To avoid violating (3) or (4), while we use the 167 // original table to select the keys, we must look them up again in the new 168 // table(s) to determine if they have been modified or deleted. There is yet 169 // another layer of complexity if the key does not compare equal itself. See 170 // [Iter.Next] for the gory details. 171 // 172 // Note that for (b) once we finish iterating over the old table we'll need to 173 // skip the next entry in the directory, as that contains the second split of 174 // the old table. We can use the old table's localDepth to determine the next 175 // logical index to use. 176 // 177 // For (b), we must adjust the current directory index when the directory 178 // grows. This is more straightforward, as the directory orders remains the 179 // same after grow, so we just double the index if the directory size doubles. 180 181 // Extracts the H1 portion of a hash: the 57 upper bits. 182 // TODO(prattmic): what about 32-bit systems? 183 func h1(h uintptr) uintptr { 184 return h >> 7 185 } 186 187 // Extracts the H2 portion of a hash: the 7 bits not used for h1. 188 // 189 // These are used as an occupied control byte. 190 func h2(h uintptr) uintptr { 191 return h & 0x7f 192 } 193 194 type Map struct { 195 // The number of filled slots (i.e. the number of elements in all 196 // tables). Excludes deleted slots. 197 // Must be first (known by the compiler, for len() builtin). 198 used uint64 199 200 // seed is the hash seed, computed as a unique random number per map. 201 seed uintptr 202 203 // The directory of tables. 204 // 205 // Normally dirPtr points to an array of table pointers 206 // 207 // dirPtr *[dirLen]*table 208 // 209 // The length (dirLen) of this array is `1 << globalDepth`. Multiple 210 // entries may point to the same table. See top-level comment for more 211 // details. 212 // 213 // Small map optimization: if the map always contained 214 // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a 215 // single group. In that case dirPtr points directly to a single group. 216 // 217 // dirPtr *group 218 // 219 // In this case, dirLen is 0. used counts the number of used slots in 220 // the group. Note that small maps never have deleted slots (as there 221 // is no probe sequence to maintain). 222 dirPtr unsafe.Pointer 223 dirLen int 224 225 // The number of bits to use in table directory lookups. 226 globalDepth uint8 227 228 // The number of bits to shift out of the hash for directory lookups. 229 // On 64-bit systems, this is 64 - globalDepth. 230 globalShift uint8 231 232 // writing is a flag that is toggled (XOR 1) while the map is being 233 // written. Normally it is set to 1 when writing, but if there are 234 // multiple concurrent writers, then toggling increases the probability 235 // that both sides will detect the race. 236 writing uint8 237 238 // clearSeq is a sequence counter of calls to Clear. It is used to 239 // detect map clears during iteration. 240 clearSeq uint64 241 } 242 243 func depthToShift(depth uint8) uint8 { 244 if goarch.PtrSize == 4 { 245 return 32 - depth 246 } 247 return 64 - depth 248 } 249 250 // If m is non-nil, it should be used rather than allocating. 251 // 252 // maxAlloc should be runtime.maxAlloc. 253 // 254 // TODO(prattmic): Put maxAlloc somewhere accessible. 255 func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map { 256 if m == nil { 257 m = new(Map) 258 } 259 260 m.seed = uintptr(rand()) 261 262 if hint <= abi.SwissMapGroupSlots { 263 // A small map can fill all 8 slots, so no need to increase 264 // target capacity. 265 // 266 // In fact, since an 8 slot group is what the first assignment 267 // to an empty map would allocate anyway, it doesn't matter if 268 // we allocate here or on the first assignment. 269 // 270 // Thus we just return without allocating. (We'll save the 271 // allocation completely if no assignment comes.) 272 273 // Note that the compiler may have initialized m.dirPtr with a 274 // pointer to a stack-allocated group, in which case we already 275 // have a group. The control word is already initialized. 276 277 return m 278 } 279 280 // Full size map. 281 282 // Set initial capacity to hold hint entries without growing in the 283 // average case. 284 targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad 285 if targetCapacity < hint { // overflow 286 return m // return an empty map. 287 } 288 289 dirSize := (uint64(targetCapacity) + maxTableCapacity - 1) / maxTableCapacity 290 dirSize, overflow := alignUpPow2(dirSize) 291 if overflow || dirSize > uint64(math.MaxUintptr) { 292 return m // return an empty map. 293 } 294 295 // Reject hints that are obviously too large. 296 groups, overflow := math.MulUintptr(uintptr(dirSize), maxTableCapacity) 297 if overflow { 298 return m // return an empty map. 299 } else { 300 mem, overflow := math.MulUintptr(groups, mt.GroupSize) 301 if overflow || mem > maxAlloc { 302 return m // return an empty map. 303 } 304 } 305 306 m.globalDepth = uint8(sys.TrailingZeros64(dirSize)) 307 m.globalShift = depthToShift(m.globalDepth) 308 309 directory := make([]*table, dirSize) 310 311 for i := range directory { 312 // TODO: Think more about initial table capacity. 313 directory[i] = newTable(mt, uint64(targetCapacity)/dirSize, i, m.globalDepth) 314 } 315 316 m.dirPtr = unsafe.Pointer(&directory[0]) 317 m.dirLen = len(directory) 318 319 return m 320 } 321 322 func NewEmptyMap() *Map { 323 m := new(Map) 324 m.seed = uintptr(rand()) 325 // See comment in NewMap. No need to eager allocate a group. 326 return m 327 } 328 329 func (m *Map) directoryIndex(hash uintptr) uintptr { 330 if m.dirLen == 1 { 331 return 0 332 } 333 return hash >> (m.globalShift & 63) 334 } 335 336 func (m *Map) directoryAt(i uintptr) *table { 337 return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) 338 } 339 340 func (m *Map) directorySet(i uintptr, nt *table) { 341 *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt 342 } 343 344 func (m *Map) replaceTable(nt *table) { 345 // The number of entries that reference the same table doubles for each 346 // time the globalDepth grows without the table splitting. 347 entries := 1 << (m.globalDepth - nt.localDepth) 348 for i := 0; i < entries; i++ { 349 //m.directory[nt.index+i] = nt 350 m.directorySet(uintptr(nt.index+i), nt) 351 } 352 } 353 354 func (m *Map) installTableSplit(old, left, right *table) { 355 if old.localDepth == m.globalDepth { 356 // No room for another level in the directory. Grow the 357 // directory. 358 newDir := make([]*table, m.dirLen*2) 359 for i := range m.dirLen { 360 t := m.directoryAt(uintptr(i)) 361 newDir[2*i] = t 362 newDir[2*i+1] = t 363 // t may already exist in multiple indicies. We should 364 // only update t.index once. Since the index must 365 // increase, seeing the original index means this must 366 // be the first time we've encountered this table. 367 if t.index == i { 368 t.index = 2 * i 369 } 370 } 371 m.globalDepth++ 372 m.globalShift-- 373 //m.directory = newDir 374 m.dirPtr = unsafe.Pointer(&newDir[0]) 375 m.dirLen = len(newDir) 376 } 377 378 // N.B. left and right may still consume multiple indicies if the 379 // directory has grown multiple times since old was last split. 380 left.index = old.index 381 m.replaceTable(left) 382 383 entries := 1 << (m.globalDepth - left.localDepth) 384 right.index = left.index + entries 385 m.replaceTable(right) 386 } 387 388 func (m *Map) Used() uint64 { 389 return m.used 390 } 391 392 // Get performs a lookup of the key that key points to. It returns a pointer to 393 // the element, or false if the key doesn't exist. 394 func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 395 return m.getWithoutKey(typ, key) 396 } 397 398 func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 399 if m.Used() == 0 { 400 return nil, nil, false 401 } 402 403 if m.writing != 0 { 404 fatal("concurrent map read and map write") 405 } 406 407 hash := typ.Hasher(key, m.seed) 408 409 if m.dirLen == 0 { 410 return m.getWithKeySmall(typ, hash, key) 411 } 412 413 idx := m.directoryIndex(hash) 414 return m.directoryAt(idx).getWithKey(typ, hash, key) 415 } 416 417 func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 418 if m.Used() == 0 { 419 return nil, false 420 } 421 422 if m.writing != 0 { 423 fatal("concurrent map read and map write") 424 } 425 426 hash := typ.Hasher(key, m.seed) 427 428 if m.dirLen == 0 { 429 _, elem, ok := m.getWithKeySmall(typ, hash, key) 430 return elem, ok 431 } 432 433 idx := m.directoryIndex(hash) 434 return m.directoryAt(idx).getWithoutKey(typ, hash, key) 435 } 436 437 func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 438 g := groupReference{ 439 data: m.dirPtr, 440 } 441 442 h2 := uint8(h2(hash)) 443 ctrls := *g.ctrls() 444 445 for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ { 446 c := uint8(ctrls) 447 ctrls >>= 8 448 if c != h2 { 449 continue 450 } 451 452 slotKey := g.key(typ, i) 453 if typ.IndirectKey() { 454 slotKey = *((*unsafe.Pointer)(slotKey)) 455 } 456 457 if typ.Key.Equal(key, slotKey) { 458 slotElem := g.elem(typ, i) 459 if typ.IndirectElem() { 460 slotElem = *((*unsafe.Pointer)(slotElem)) 461 } 462 return slotKey, slotElem, true 463 } 464 } 465 466 return nil, nil, false 467 } 468 469 func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) { 470 slotElem := m.PutSlot(typ, key) 471 typedmemmove(typ.Elem, slotElem, elem) 472 } 473 474 // PutSlot returns a pointer to the element slot where an inserted element 475 // should be written. 476 // 477 // PutSlot never returns nil. 478 func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer { 479 if m.writing != 0 { 480 fatal("concurrent map writes") 481 } 482 483 hash := typ.Hasher(key, m.seed) 484 485 // Set writing after calling Hasher, since Hasher may panic, in which 486 // case we have not actually done a write. 487 m.writing ^= 1 // toggle, see comment on writing 488 489 if m.dirPtr == nil { 490 m.growToSmall(typ) 491 } 492 493 if m.dirLen == 0 { 494 if m.used < abi.SwissMapGroupSlots { 495 elem := m.putSlotSmall(typ, hash, key) 496 497 if m.writing == 0 { 498 fatal("concurrent map writes") 499 } 500 m.writing ^= 1 501 502 return elem 503 } 504 505 // Can't fit another entry, grow to full size map. 506 // 507 // TODO(prattmic): If this is an update to an existing key then 508 // we actually don't need to grow. 509 m.growToTable(typ) 510 } 511 512 for { 513 idx := m.directoryIndex(hash) 514 elem, ok := m.directoryAt(idx).PutSlot(typ, m, hash, key) 515 if !ok { 516 continue 517 } 518 519 if m.writing == 0 { 520 fatal("concurrent map writes") 521 } 522 m.writing ^= 1 523 524 return elem 525 } 526 } 527 528 func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { 529 g := groupReference{ 530 data: m.dirPtr, 531 } 532 533 match := g.ctrls().matchH2(h2(hash)) 534 535 // Look for an existing slot containing this key. 536 for match != 0 { 537 i := match.first() 538 539 slotKey := g.key(typ, i) 540 if typ.IndirectKey() { 541 slotKey = *((*unsafe.Pointer)(slotKey)) 542 } 543 if typ.Key.Equal(key, slotKey) { 544 if typ.NeedKeyUpdate() { 545 typedmemmove(typ.Key, slotKey, key) 546 } 547 548 slotElem := g.elem(typ, i) 549 if typ.IndirectElem() { 550 slotElem = *((*unsafe.Pointer)(slotElem)) 551 } 552 553 return slotElem 554 } 555 match = match.removeFirst() 556 } 557 558 // There can't be deleted slots, small maps can't have them 559 // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 560 // more efficient than matchEmpty. 561 match = g.ctrls().matchEmptyOrDeleted() 562 if match == 0 { 563 fatal("small map with no empty slot (concurrent map writes?)") 564 return nil 565 } 566 567 i := match.first() 568 569 slotKey := g.key(typ, i) 570 if typ.IndirectKey() { 571 kmem := newobject(typ.Key) 572 *(*unsafe.Pointer)(slotKey) = kmem 573 slotKey = kmem 574 } 575 typedmemmove(typ.Key, slotKey, key) 576 577 slotElem := g.elem(typ, i) 578 if typ.IndirectElem() { 579 emem := newobject(typ.Elem) 580 *(*unsafe.Pointer)(slotElem) = emem 581 slotElem = emem 582 } 583 584 g.ctrls().set(i, ctrl(h2(hash))) 585 m.used++ 586 587 return slotElem 588 } 589 590 func (m *Map) growToSmall(typ *abi.SwissMapType) { 591 grp := newGroups(typ, 1) 592 m.dirPtr = grp.data 593 594 g := groupReference{ 595 data: m.dirPtr, 596 } 597 g.ctrls().setEmpty() 598 } 599 600 func (m *Map) growToTable(typ *abi.SwissMapType) { 601 tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0) 602 603 g := groupReference{ 604 data: m.dirPtr, 605 } 606 607 for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ { 608 if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty { 609 // Empty 610 continue 611 } 612 613 key := g.key(typ, i) 614 if typ.IndirectKey() { 615 key = *((*unsafe.Pointer)(key)) 616 } 617 618 elem := g.elem(typ, i) 619 if typ.IndirectElem() { 620 elem = *((*unsafe.Pointer)(elem)) 621 } 622 623 hash := typ.Hasher(key, m.seed) 624 625 tab.uncheckedPutSlot(typ, hash, key, elem) 626 } 627 628 directory := make([]*table, 1) 629 630 directory[0] = tab 631 632 m.dirPtr = unsafe.Pointer(&directory[0]) 633 m.dirLen = len(directory) 634 635 m.globalDepth = 0 636 m.globalShift = depthToShift(m.globalDepth) 637 } 638 639 func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) { 640 if m == nil || m.Used() == 0 { 641 if err := mapKeyError(typ, key); err != nil { 642 panic(err) // see issue 23734 643 } 644 return 645 } 646 647 if m.writing != 0 { 648 fatal("concurrent map writes") 649 } 650 651 hash := typ.Hasher(key, m.seed) 652 653 // Set writing after calling Hasher, since Hasher may panic, in which 654 // case we have not actually done a write. 655 m.writing ^= 1 // toggle, see comment on writing 656 657 if m.dirLen == 0 { 658 m.deleteSmall(typ, hash, key) 659 } else { 660 idx := m.directoryIndex(hash) 661 m.directoryAt(idx).Delete(typ, m, hash, key) 662 } 663 664 if m.used == 0 { 665 // Reset the hash seed to make it more difficult for attackers 666 // to repeatedly trigger hash collisions. See 667 // https://go.dev/issue/25237. 668 m.seed = uintptr(rand()) 669 } 670 671 if m.writing == 0 { 672 fatal("concurrent map writes") 673 } 674 m.writing ^= 1 675 } 676 677 func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) { 678 g := groupReference{ 679 data: m.dirPtr, 680 } 681 682 match := g.ctrls().matchH2(h2(hash)) 683 684 for match != 0 { 685 i := match.first() 686 slotKey := g.key(typ, i) 687 origSlotKey := slotKey 688 if typ.IndirectKey() { 689 slotKey = *((*unsafe.Pointer)(slotKey)) 690 } 691 if typ.Key.Equal(key, slotKey) { 692 m.used-- 693 694 if typ.IndirectKey() { 695 // Clearing the pointer is sufficient. 696 *(*unsafe.Pointer)(origSlotKey) = nil 697 } else if typ.Key.Pointers() { 698 // Only bother clearing if there are pointers. 699 typedmemclr(typ.Key, slotKey) 700 } 701 702 slotElem := g.elem(typ, i) 703 if typ.IndirectElem() { 704 // Clearing the pointer is sufficient. 705 *(*unsafe.Pointer)(slotElem) = nil 706 } else { 707 // Unlike keys, always clear the elem (even if 708 // it contains no pointers), as compound 709 // assignment operations depend on cleared 710 // deleted values. See 711 // https://go.dev/issue/25936. 712 typedmemclr(typ.Elem, slotElem) 713 } 714 715 // We only have 1 group, so it is OK to immediately 716 // reuse deleted slots. 717 g.ctrls().set(i, ctrlEmpty) 718 return 719 } 720 match = match.removeFirst() 721 } 722 } 723 724 // Clear deletes all entries from the map resulting in an empty map. 725 func (m *Map) Clear(typ *abi.SwissMapType) { 726 if m == nil || m.Used() == 0 { 727 return 728 } 729 730 if m.writing != 0 { 731 fatal("concurrent map writes") 732 } 733 m.writing ^= 1 // toggle, see comment on writing 734 735 if m.dirLen == 0 { 736 m.clearSmall(typ) 737 } else { 738 var lastTab *table 739 for i := range m.dirLen { 740 t := m.directoryAt(uintptr(i)) 741 if t == lastTab { 742 continue 743 } 744 t.Clear(typ) 745 lastTab = t 746 } 747 m.used = 0 748 m.clearSeq++ 749 // TODO: shrink directory? 750 } 751 752 // Reset the hash seed to make it more difficult for attackers to 753 // repeatedly trigger hash collisions. See https://go.dev/issue/25237. 754 m.seed = uintptr(rand()) 755 756 if m.writing == 0 { 757 fatal("concurrent map writes") 758 } 759 m.writing ^= 1 760 } 761 762 func (m *Map) clearSmall(typ *abi.SwissMapType) { 763 g := groupReference{ 764 data: m.dirPtr, 765 } 766 767 typedmemclr(typ.Group, g.data) 768 g.ctrls().setEmpty() 769 770 m.used = 0 771 m.clearSeq++ 772 } 773