Source file src/runtime/mgcstack.go
1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: stack objects and stack tracing 6 // See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing 7 // Also see issue 22350. 8 9 // Stack tracing solves the problem of determining which parts of the 10 // stack are live and should be scanned. It runs as part of scanning 11 // a single goroutine stack. 12 // 13 // Normally determining which parts of the stack are live is easy to 14 // do statically, as user code has explicit references (reads and 15 // writes) to stack variables. The compiler can do a simple dataflow 16 // analysis to determine liveness of stack variables at every point in 17 // the code. See cmd/compile/internal/gc/plive.go for that analysis. 18 // 19 // However, when we take the address of a stack variable, determining 20 // whether that variable is still live is less clear. We can still 21 // look for static accesses, but accesses through a pointer to the 22 // variable are difficult in general to track statically. That pointer 23 // can be passed among functions on the stack, conditionally retained, 24 // etc. 25 // 26 // Instead, we will track pointers to stack variables dynamically. 27 // All pointers to stack-allocated variables will themselves be on the 28 // stack somewhere (or in associated locations, like defer records), so 29 // we can find them all efficiently. 30 // 31 // Stack tracing is organized as a mini garbage collection tracing 32 // pass. The objects in this garbage collection are all the variables 33 // on the stack whose address is taken, and which themselves contain a 34 // pointer. We call these variables "stack objects". 35 // 36 // We begin by determining all the stack objects on the stack and all 37 // the statically live pointers that may point into the stack. We then 38 // process each pointer to see if it points to a stack object. If it 39 // does, we scan that stack object. It may contain pointers into the 40 // heap, in which case those pointers are passed to the main garbage 41 // collection. It may also contain pointers into the stack, in which 42 // case we add them to our set of stack pointers. 43 // 44 // Once we're done processing all the pointers (including the ones we 45 // added during processing), we've found all the stack objects that 46 // are live. Any dead stack objects are not scanned and their contents 47 // will not keep heap objects live. Unlike the main garbage 48 // collection, we can't sweep the dead stack objects; they live on in 49 // a moribund state until the stack frame that contains them is 50 // popped. 51 // 52 // A stack can look like this: 53 // 54 // +----------+ 55 // | foo() | 56 // | +------+ | 57 // | | A | | <---\ 58 // | +------+ | | 59 // | | | 60 // | +------+ | | 61 // | | B | | | 62 // | +------+ | | 63 // | | | 64 // +----------+ | 65 // | bar() | | 66 // | +------+ | | 67 // | | C | | <-\ | 68 // | +----|-+ | | | 69 // | | | | | 70 // | +----v-+ | | | 71 // | | D ---------/ 72 // | +------+ | | 73 // | | | 74 // +----------+ | 75 // | baz() | | 76 // | +------+ | | 77 // | | E -------/ 78 // | +------+ | 79 // | ^ | 80 // | F: --/ | 81 // | | 82 // +----------+ 83 // 84 // foo() calls bar() calls baz(). Each has a frame on the stack. 85 // foo() has stack objects A and B. 86 // bar() has stack objects C and D, with C pointing to D and D pointing to A. 87 // baz() has a stack object E pointing to C, and a local variable F pointing to E. 88 // 89 // Starting from the pointer in local variable F, we will eventually 90 // scan all of E, C, D, and A (in that order). B is never scanned 91 // because there is no live pointer to it. If B is also statically 92 // dead (meaning that foo() never accesses B again after it calls 93 // bar()), then B's pointers into the heap are not considered live. 94 95 package runtime 96 97 import ( 98 "internal/goarch" 99 "runtime/internal/sys" 100 "unsafe" 101 ) 102 103 const stackTraceDebug = false 104 105 // Buffer for pointers found during stack tracing. 106 // Must be smaller than or equal to workbuf. 107 type stackWorkBuf struct { 108 _ sys.NotInHeap 109 stackWorkBufHdr 110 obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr 111 } 112 113 // Header declaration must come after the buf declaration above, because of issue #14620. 114 type stackWorkBufHdr struct { 115 _ sys.NotInHeap 116 workbufhdr 117 next *stackWorkBuf // linked list of workbufs 118 // Note: we could theoretically repurpose lfnode.next as this next pointer. 119 // It would save 1 word, but that probably isn't worth busting open 120 // the lfnode API. 121 } 122 123 // Buffer for stack objects found on a goroutine stack. 124 // Must be smaller than or equal to workbuf. 125 type stackObjectBuf struct { 126 _ sys.NotInHeap 127 stackObjectBufHdr 128 obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject 129 } 130 131 type stackObjectBufHdr struct { 132 _ sys.NotInHeap 133 workbufhdr 134 next *stackObjectBuf 135 } 136 137 func init() { 138 if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { 139 panic("stackWorkBuf too big") 140 } 141 if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { 142 panic("stackObjectBuf too big") 143 } 144 } 145 146 // A stackObject represents a variable on the stack that has had 147 // its address taken. 148 type stackObject struct { 149 _ sys.NotInHeap 150 off uint32 // offset above stack.lo 151 size uint32 // size of object 152 r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned. 153 left *stackObject // objects with lower addresses 154 right *stackObject // objects with higher addresses 155 } 156 157 // obj.r = r, but with no write barrier. 158 // 159 //go:nowritebarrier 160 func (obj *stackObject) setRecord(r *stackObjectRecord) { 161 // Types of stack objects are always in read-only memory, not the heap. 162 // So not using a write barrier is ok. 163 *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r)) 164 } 165 166 // A stackScanState keeps track of the state used during the GC walk 167 // of a goroutine. 168 type stackScanState struct { 169 // stack limits 170 stack stack 171 172 // conservative indicates that the next frame must be scanned conservatively. 173 // This applies only to the innermost frame at an async safe-point. 174 conservative bool 175 176 // buf contains the set of possible pointers to stack objects. 177 // Organized as a LIFO linked list of buffers. 178 // All buffers except possibly the head buffer are full. 179 buf *stackWorkBuf 180 freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis 181 182 // cbuf contains conservative pointers to stack objects. If 183 // all pointers to a stack object are obtained via 184 // conservative scanning, then the stack object may be dead 185 // and may contain dead pointers, so it must be scanned 186 // defensively. 187 cbuf *stackWorkBuf 188 189 // list of stack objects 190 // Objects are in increasing address order. 191 head *stackObjectBuf 192 tail *stackObjectBuf 193 nobjs int 194 195 // root of binary tree for fast object lookup by address 196 // Initialized by buildIndex. 197 root *stackObject 198 } 199 200 // Add p as a potential pointer to a stack object. 201 // p must be a stack address. 202 func (s *stackScanState) putPtr(p uintptr, conservative bool) { 203 if p < s.stack.lo || p >= s.stack.hi { 204 throw("address not a stack address") 205 } 206 head := &s.buf 207 if conservative { 208 head = &s.cbuf 209 } 210 buf := *head 211 if buf == nil { 212 // Initial setup. 213 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 214 buf.nobj = 0 215 buf.next = nil 216 *head = buf 217 } else if buf.nobj == len(buf.obj) { 218 if s.freeBuf != nil { 219 buf = s.freeBuf 220 s.freeBuf = nil 221 } else { 222 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 223 } 224 buf.nobj = 0 225 buf.next = *head 226 *head = buf 227 } 228 buf.obj[buf.nobj] = p 229 buf.nobj++ 230 } 231 232 // Remove and return a potential pointer to a stack object. 233 // Returns 0 if there are no more pointers available. 234 // 235 // This prefers non-conservative pointers so we scan stack objects 236 // precisely if there are any non-conservative pointers to them. 237 func (s *stackScanState) getPtr() (p uintptr, conservative bool) { 238 for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} { 239 buf := *head 240 if buf == nil { 241 // Never had any data. 242 continue 243 } 244 if buf.nobj == 0 { 245 if s.freeBuf != nil { 246 // Free old freeBuf. 247 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 248 } 249 // Move buf to the freeBuf. 250 s.freeBuf = buf 251 buf = buf.next 252 *head = buf 253 if buf == nil { 254 // No more data in this list. 255 continue 256 } 257 } 258 buf.nobj-- 259 return buf.obj[buf.nobj], head == &s.cbuf 260 } 261 // No more data in either list. 262 if s.freeBuf != nil { 263 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 264 s.freeBuf = nil 265 } 266 return 0, false 267 } 268 269 // addObject adds a stack object at addr of type typ to the set of stack objects. 270 func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) { 271 x := s.tail 272 if x == nil { 273 // initial setup 274 x = (*stackObjectBuf)(unsafe.Pointer(getempty())) 275 x.next = nil 276 s.head = x 277 s.tail = x 278 } 279 if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { 280 throw("objects added out of order or overlapping") 281 } 282 if x.nobj == len(x.obj) { 283 // full buffer - allocate a new buffer, add to end of linked list 284 y := (*stackObjectBuf)(unsafe.Pointer(getempty())) 285 y.next = nil 286 x.next = y 287 s.tail = y 288 x = y 289 } 290 obj := &x.obj[x.nobj] 291 x.nobj++ 292 obj.off = uint32(addr - s.stack.lo) 293 obj.size = uint32(r.size) 294 obj.setRecord(r) 295 // obj.left and obj.right will be initialized by buildIndex before use. 296 s.nobjs++ 297 } 298 299 // buildIndex initializes s.root to a binary search tree. 300 // It should be called after all addObject calls but before 301 // any call of findObject. 302 func (s *stackScanState) buildIndex() { 303 s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) 304 } 305 306 // Build a binary search tree with the n objects in the list 307 // x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... 308 // Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. 309 // (The first object that was not included in the binary search tree.) 310 // If n == 0, returns nil, x. 311 func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { 312 if n == 0 { 313 return nil, x, idx 314 } 315 var left, right *stackObject 316 left, x, idx = binarySearchTree(x, idx, n/2) 317 root = &x.obj[idx] 318 idx++ 319 if idx == len(x.obj) { 320 x = x.next 321 idx = 0 322 } 323 right, x, idx = binarySearchTree(x, idx, n-n/2-1) 324 root.left = left 325 root.right = right 326 return root, x, idx 327 } 328 329 // findObject returns the stack object containing address a, if any. 330 // Must have called buildIndex previously. 331 func (s *stackScanState) findObject(a uintptr) *stackObject { 332 off := uint32(a - s.stack.lo) 333 obj := s.root 334 for { 335 if obj == nil { 336 return nil 337 } 338 if off < obj.off { 339 obj = obj.left 340 continue 341 } 342 if off >= obj.off+obj.size { 343 obj = obj.right 344 continue 345 } 346 return obj 347 } 348 } 349