Source file src/runtime/tracemap.go
1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Simple append-only thread-safe hash map for tracing. 6 // Provides a mapping between variable-length data and a 7 // unique ID. Subsequent puts of the same data will return 8 // the same ID. The zero value is ready to use. 9 // 10 // Uses a region-based allocation scheme internally, and 11 // reset clears the whole map. 12 // 13 // It avoids doing any high-level Go operations so it's safe 14 // to use even in sensitive contexts. 15 16 package runtime 17 18 import ( 19 "internal/cpu" 20 "internal/goarch" 21 "internal/runtime/atomic" 22 "internal/runtime/sys" 23 "unsafe" 24 ) 25 26 // traceMap is a map of a variable-sized array of bytes to a unique ID. 27 // 28 // Because traceMap just operates on raw bytes, this type is used as the 29 // backing store for both the trace string table and trace stack table, 30 // the latter of which is just an array of PCs. 31 // 32 // ID 0 is reserved for arrays of bytes of size zero. 33 type traceMap struct { 34 root atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap) 35 _ cpu.CacheLinePad 36 seq atomic.Uint64 37 _ cpu.CacheLinePad 38 mem traceRegionAlloc 39 } 40 41 // traceMapNode is an implementation of a lock-free append-only hash-trie 42 // (a trie of the hash bits). 43 // 44 // Key features: 45 // - 4-ary trie. Child nodes are indexed by the upper 2 (remaining) bits of the hash. 46 // For example, top level uses bits [63:62], next level uses [61:60] and so on. 47 // - New nodes are placed at the first empty level encountered. 48 // - When the first child is added to a node, the existing value is not moved into a child. 49 // This means that you must check the key at each level, not just at the leaf. 50 // - No deletion or rebalancing. 51 // - Intentionally devolves into a linked list on hash collisions (the hash bits will all 52 // get shifted out during iteration, and new nodes will just be appended to the 0th child). 53 type traceMapNode struct { 54 _ sys.NotInHeap 55 56 children [4]atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap) 57 hash uintptr 58 id uint64 59 data []byte 60 } 61 62 // stealID steals an ID from the table, ensuring that it will not 63 // appear in the table anymore. 64 func (tab *traceMap) stealID() uint64 { 65 return tab.seq.Add(1) 66 } 67 68 // put inserts the data into the table. 69 // 70 // It's always safe for callers to noescape data because put copies its bytes. 71 // 72 // Returns a unique ID for the data and whether this is the first time 73 // the data has been added to the map. 74 func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool) { 75 if size == 0 { 76 return 0, false 77 } 78 hash := memhash(data, 0, size) 79 80 var newNode *traceMapNode 81 m := &tab.root 82 hashIter := hash 83 for { 84 n := (*traceMapNode)(m.Load()) 85 if n == nil { 86 // Try to insert a new map node. We may end up discarding 87 // this node if we fail to insert because it turns out the 88 // value is already in the map. 89 // 90 // The discard will only happen if two threads race on inserting 91 // the same value. Both might create nodes, but only one will 92 // succeed on insertion. If two threads race to insert two 93 // different values, then both nodes will *always* get inserted, 94 // because the equality checking below will always fail. 95 // 96 // Performance note: contention on insertion is likely to be 97 // higher for small maps, but since this data structure is 98 // append-only, either the map stays small because there isn't 99 // much activity, or the map gets big and races to insert on 100 // the same node are much less likely. 101 if newNode == nil { 102 newNode = tab.newTraceMapNode(data, size, hash, tab.seq.Add(1)) 103 } 104 if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) { 105 return newNode.id, true 106 } 107 // Reload n. Because pointers are only stored once, 108 // we must have lost the race, and therefore n is not nil 109 // anymore. 110 n = (*traceMapNode)(m.Load()) 111 } 112 if n.hash == hash && uintptr(len(n.data)) == size { 113 if memequal(unsafe.Pointer(&n.data[0]), data, size) { 114 return n.id, false 115 } 116 } 117 m = &n.children[hashIter>>(8*goarch.PtrSize-2)] 118 hashIter <<= 2 119 } 120 } 121 122 func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size, hash uintptr, id uint64) *traceMapNode { 123 // Create data array. 124 sl := notInHeapSlice{ 125 array: tab.mem.alloc(size), 126 len: int(size), 127 cap: int(size), 128 } 129 memmove(unsafe.Pointer(sl.array), data, size) 130 131 // Create metadata structure. 132 meta := (*traceMapNode)(unsafe.Pointer(tab.mem.alloc(unsafe.Sizeof(traceMapNode{})))) 133 *(*notInHeapSlice)(unsafe.Pointer(&meta.data)) = sl 134 meta.id = id 135 meta.hash = hash 136 return meta 137 } 138 139 // reset drops all allocated memory from the table and resets it. 140 // 141 // The caller must ensure that there are no put operations executing concurrently 142 // with this function. 143 func (tab *traceMap) reset() { 144 tab.root.Store(nil) 145 tab.seq.Store(0) 146 tab.mem.drop() 147 } 148