Source file src/unique/handle.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package unique 6 7 import ( 8 "internal/abi" 9 "internal/concurrent" 10 "internal/weak" 11 "runtime" 12 "sync" 13 "unsafe" 14 ) 15 16 var zero uintptr 17 18 // Handle is a globally unique identity for some value of type T. 19 // 20 // Two handles compare equal exactly if the two values used to create the handles 21 // would have also compared equal. The comparison of two handles is trivial and 22 // typically much more efficient than comparing the values used to create them. 23 type Handle[T comparable] struct { 24 value *T 25 } 26 27 // Value returns a shallow copy of the T value that produced the Handle. 28 // Value is safe for concurrent use by multiple goroutines. 29 func (h Handle[T]) Value() T { 30 return *h.value 31 } 32 33 // Make returns a globally unique handle for a value of type T. Handles 34 // are equal if and only if the values used to produce them are equal. 35 // Make is safe for concurrent use by multiple goroutines. 36 func Make[T comparable](value T) Handle[T] { 37 // Find the map for type T. 38 typ := abi.TypeFor[T]() 39 if typ.Size() == 0 { 40 return Handle[T]{(*T)(unsafe.Pointer(&zero))} 41 } 42 ma, ok := uniqueMaps.Load(typ) 43 if !ok { 44 // This is a good time to initialize cleanup, since we must go through 45 // this path on the first use of Make, and it's not on the hot path. 46 setupMake.Do(registerCleanup) 47 ma = addUniqueMap[T](typ) 48 } 49 m := ma.(*uniqueMap[T]) 50 51 // Keep around any values we allocate for insertion. There 52 // are a few different ways we can race with other threads 53 // and create values that we might discard. By keeping 54 // the first one we make around, we can avoid generating 55 // more than one per racing thread. 56 var ( 57 toInsert *T // Keep this around to keep it alive. 58 toInsertWeak weak.Pointer[T] 59 ) 60 newValue := func() (T, weak.Pointer[T]) { 61 if toInsert == nil { 62 toInsert = new(T) 63 *toInsert = clone(value, &m.cloneSeq) 64 toInsertWeak = weak.Make(toInsert) 65 } 66 return *toInsert, toInsertWeak 67 } 68 var ptr *T 69 for { 70 // Check the map. 71 wp, ok := m.Load(value) 72 if !ok { 73 // Try to insert a new value into the map. 74 k, v := newValue() 75 wp, _ = m.LoadOrStore(k, v) 76 } 77 // Now that we're sure there's a value in the map, let's 78 // try to get the pointer we need out of it. 79 ptr = wp.Strong() 80 if ptr != nil { 81 break 82 } 83 // The weak pointer is nil, so the old value is truly dead. 84 // Try to remove it and start over. 85 m.CompareAndDelete(value, wp) 86 } 87 runtime.KeepAlive(toInsert) 88 return Handle[T]{ptr} 89 } 90 91 var ( 92 // uniqueMaps is an index of type-specific concurrent maps used for unique.Make. 93 // 94 // The two-level map might seem odd at first since the HashTrieMap could have "any" 95 // as its key type, but the issue is escape analysis. We do not want to force lookups 96 // to escape the argument, and using a type-specific map allows us to avoid that where 97 // possible (for example, for strings and plain-ol'-data structs). We also get the 98 // benefit of not cramming every different type into a single map, but that's certainly 99 // not enough to outweigh the cost of two map lookups. What is worth it though, is saving 100 // on those allocations. 101 uniqueMaps = concurrent.NewHashTrieMap[*abi.Type, any]() // any is always a *uniqueMap[T]. 102 103 // cleanupFuncs are functions that clean up dead weak pointers in type-specific 104 // maps in uniqueMaps. We express cleanup this way because there's no way to iterate 105 // over the sync.Map and call functions on the type-specific data structures otherwise. 106 // These cleanup funcs each close over one of these type-specific maps. 107 // 108 // cleanupMu protects cleanupNotify and is held across the entire cleanup. Used for testing. 109 // cleanupNotify is a test-only mechanism that allow tests to wait for the cleanup to run. 110 cleanupMu sync.Mutex 111 cleanupFuncsMu sync.Mutex 112 cleanupFuncs []func() 113 cleanupNotify []func() // One-time notifications when cleanups finish. 114 ) 115 116 type uniqueMap[T comparable] struct { 117 *concurrent.HashTrieMap[T, weak.Pointer[T]] 118 cloneSeq 119 } 120 121 func addUniqueMap[T comparable](typ *abi.Type) *uniqueMap[T] { 122 // Create a map for T and try to register it. We could 123 // race with someone else, but that's fine; it's one 124 // small, stray allocation. The number of allocations 125 // this can create is bounded by a small constant. 126 m := &uniqueMap[T]{ 127 HashTrieMap: concurrent.NewHashTrieMap[T, weak.Pointer[T]](), 128 cloneSeq: makeCloneSeq(typ), 129 } 130 a, loaded := uniqueMaps.LoadOrStore(typ, m) 131 if !loaded { 132 // Add a cleanup function for the new map. 133 cleanupFuncsMu.Lock() 134 cleanupFuncs = append(cleanupFuncs, func() { 135 // Delete all the entries whose weak references are nil and clean up 136 // deleted entries. 137 m.All()(func(key T, wp weak.Pointer[T]) bool { 138 if wp.Strong() == nil { 139 m.CompareAndDelete(key, wp) 140 } 141 return true 142 }) 143 }) 144 cleanupFuncsMu.Unlock() 145 } 146 return a.(*uniqueMap[T]) 147 } 148 149 // setupMake is used to perform initial setup for unique.Make. 150 var setupMake sync.Once 151 152 // startBackgroundCleanup sets up a background goroutine to occasionally call cleanupFuncs. 153 func registerCleanup() { 154 runtime_registerUniqueMapCleanup(func() { 155 // Lock for cleanup. 156 cleanupMu.Lock() 157 158 // Grab funcs to run. 159 cleanupFuncsMu.Lock() 160 cf := cleanupFuncs 161 cleanupFuncsMu.Unlock() 162 163 // Run cleanup. 164 for _, f := range cf { 165 f() 166 } 167 168 // Run cleanup notifications. 169 for _, f := range cleanupNotify { 170 f() 171 } 172 cleanupNotify = nil 173 174 // Finished. 175 cleanupMu.Unlock() 176 }) 177 } 178 179 // Implemented in runtime. 180 181 //go:linkname runtime_registerUniqueMapCleanup 182 func runtime_registerUniqueMapCleanup(cleanup func()) 183