Source file src/runtime/mcleanup.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/cpu" 10 "internal/goarch" 11 "internal/runtime/atomic" 12 "internal/runtime/math" 13 "internal/runtime/sys" 14 "unsafe" 15 ) 16 17 // AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer 18 // reachable, the runtime will call cleanup(arg) in a separate goroutine. 19 // 20 // A typical use is that ptr is an object wrapping an underlying resource (e.g., 21 // a File object wrapping an OS file descriptor), arg is the underlying resource 22 // (e.g., the OS file descriptor), and the cleanup function releases the underlying 23 // resource (e.g., by calling the close system call). 24 // 25 // There are few constraints on ptr. In particular, multiple cleanups may be 26 // attached to the same pointer, or to different pointers within the same 27 // allocation. 28 // 29 // If ptr is reachable from cleanup or arg, ptr will never be collected 30 // and the cleanup will never run. As a protection against simple cases of this, 31 // AddCleanup panics if arg is equal to ptr. 32 // 33 // There is no specified order in which cleanups will run. 34 // In particular, if several objects point to each other and all become 35 // unreachable at the same time, their cleanups all become eligible to run 36 // and can run in any order. This is true even if the objects form a cycle. 37 // 38 // Cleanups run concurrently with any user-created goroutines. 39 // Cleanups may also run concurrently with one another (unlike finalizers). 40 // If a cleanup function must run for a long time, it should create a new goroutine 41 // to avoid blocking the execution of other cleanups. 42 // 43 // If ptr has both a cleanup and a finalizer, the cleanup will only run once 44 // it has been finalized and becomes unreachable without an associated finalizer. 45 // 46 // The cleanup(arg) call is not always guaranteed to run; in particular it is not 47 // guaranteed to run before program exit. 48 // 49 // Cleanups are not guaranteed to run if the size of T is zero bytes, because 50 // it may share same address with other zero-size objects in memory. See 51 // https://go.dev/ref/spec#Size_and_alignment_guarantees. 52 // 53 // It is not guaranteed that a cleanup will run for objects allocated 54 // in initializers for package-level variables. Such objects may be 55 // linker-allocated, not heap-allocated. 56 // 57 // Note that because cleanups may execute arbitrarily far into the future 58 // after an object is no longer referenced, the runtime is allowed to perform 59 // a space-saving optimization that batches objects together in a single 60 // allocation slot. The cleanup for an unreferenced object in such an 61 // allocation may never run if it always exists in the same batch as a 62 // referenced object. Typically, this batching only happens for tiny 63 // (on the order of 16 bytes or less) and pointer-free objects. 64 // 65 // A cleanup may run as soon as an object becomes unreachable. 66 // In order to use cleanups correctly, the program must ensure that 67 // the object is reachable until it is safe to run its cleanup. 68 // Objects stored in global variables, or that can be found by tracing 69 // pointers from a global variable, are reachable. A function argument or 70 // receiver may become unreachable at the last point where the function 71 // mentions it. To ensure a cleanup does not get called prematurely, 72 // pass the object to the [KeepAlive] function after the last point 73 // where the object must remain reachable. 74 func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { 75 // Explicitly force ptr to escape to the heap. 76 ptr = abi.Escape(ptr) 77 78 // The pointer to the object must be valid. 79 if ptr == nil { 80 panic("runtime.AddCleanup: ptr is nil") 81 } 82 usptr := uintptr(unsafe.Pointer(ptr)) 83 84 // Check that arg is not equal to ptr. 85 if kind := abi.TypeOf(arg).Kind(); kind == abi.Pointer || kind == abi.UnsafePointer { 86 if unsafe.Pointer(ptr) == *((*unsafe.Pointer)(unsafe.Pointer(&arg))) { 87 panic("runtime.AddCleanup: ptr is equal to arg, cleanup will never run") 88 } 89 } 90 if inUserArenaChunk(usptr) { 91 // Arena-allocated objects are not eligible for cleanup. 92 panic("runtime.AddCleanup: ptr is arena-allocated") 93 } 94 if debug.sbrk != 0 { 95 // debug.sbrk never frees memory, so no cleanup will ever run 96 // (and we don't have the data structures to record them). 97 // Return a noop cleanup. 98 return Cleanup{} 99 } 100 101 fn := func() { 102 cleanup(arg) 103 } 104 // Closure must escape. 105 fv := *(**funcval)(unsafe.Pointer(&fn)) 106 fv = abi.Escape(fv) 107 108 // Find the containing object. 109 base, _, _ := findObject(usptr, 0, 0) 110 if base == 0 { 111 if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) { 112 // Cleanup is a noop. 113 return Cleanup{} 114 } 115 panic("runtime.AddCleanup: ptr not in allocated block") 116 } 117 118 // Create another G if necessary. 119 if gcCleanups.needG() { 120 gcCleanups.createGs() 121 } 122 123 id := addCleanup(unsafe.Pointer(ptr), fv) 124 if debug.checkfinalizers != 0 { 125 cleanupFn := *(**funcval)(unsafe.Pointer(&cleanup)) 126 setCleanupContext(unsafe.Pointer(ptr), abi.TypeFor[T](), sys.GetCallerPC(), cleanupFn.fn, id) 127 } 128 return Cleanup{ 129 id: id, 130 ptr: usptr, 131 } 132 } 133 134 // Cleanup is a handle to a cleanup call for a specific object. 135 type Cleanup struct { 136 // id is the unique identifier for the cleanup within the arena. 137 id uint64 138 // ptr contains the pointer to the object. 139 ptr uintptr 140 } 141 142 // Stop cancels the cleanup call. Stop will have no effect if the cleanup call 143 // has already been queued for execution (because ptr became unreachable). 144 // To guarantee that Stop removes the cleanup function, the caller must ensure 145 // that the pointer that was passed to AddCleanup is reachable across the call to Stop. 146 func (c Cleanup) Stop() { 147 if c.id == 0 { 148 // id is set to zero when the cleanup is a noop. 149 return 150 } 151 152 // The following block removes the Special record of type cleanup for the object c.ptr. 153 span := spanOfHeap(c.ptr) 154 if span == nil { 155 return 156 } 157 // Ensure that the span is swept. 158 // Sweeping accesses the specials list w/o locks, so we have 159 // to synchronize with it. And it's just much safer. 160 mp := acquirem() 161 span.ensureSwept() 162 163 offset := c.ptr - span.base() 164 165 var found *special 166 lock(&span.speciallock) 167 168 iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup) 169 if exists { 170 for { 171 s := *iter 172 if s == nil { 173 // Reached the end of the linked list. Stop searching at this point. 174 break 175 } 176 if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind && 177 (*specialCleanup)(unsafe.Pointer(s)).id == c.id { 178 // The special is a cleanup and contains a matching cleanup id. 179 *iter = s.next 180 found = s 181 break 182 } 183 if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) { 184 // The special is outside the region specified for that kind of 185 // special. The specials are sorted by kind. 186 break 187 } 188 // Try the next special. 189 iter = &s.next 190 } 191 } 192 if span.specials == nil { 193 spanHasNoSpecials(span) 194 } 195 unlock(&span.speciallock) 196 releasem(mp) 197 198 if found == nil { 199 return 200 } 201 lock(&mheap_.speciallock) 202 mheap_.specialCleanupAlloc.free(unsafe.Pointer(found)) 203 unlock(&mheap_.speciallock) 204 205 if debug.checkfinalizers != 0 { 206 clearCleanupContext(c.ptr, c.id) 207 } 208 } 209 210 const cleanupBlockSize = 512 211 212 // cleanupBlock is an block of cleanups to be executed. 213 // 214 // cleanupBlock is allocated from non-GC'd memory, so any heap pointers 215 // must be specially handled. The GC and cleanup queue currently assume 216 // that the cleanup queue does not grow during marking (but it can shrink). 217 type cleanupBlock struct { 218 cleanupBlockHeader 219 cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / goarch.PtrSize]*funcval 220 } 221 222 var cleanupBlockPtrMask [cleanupBlockSize / goarch.PtrSize / 8]byte 223 224 type cleanupBlockHeader struct { 225 _ sys.NotInHeap 226 lfnode 227 alllink *cleanupBlock 228 229 // n is sometimes accessed atomically. 230 // 231 // The invariant depends on what phase the garbage collector is in. 232 // During the sweep phase (gcphase == _GCoff), each block has exactly 233 // one owner, so it's always safe to update this without atomics. 234 // But if this *could* be updated during the mark phase, it must be 235 // updated atomically to synchronize with the garbage collector 236 // scanning the block as a root. 237 n uint32 238 } 239 240 // enqueue pushes a single cleanup function into the block. 241 // 242 // Returns if this enqueue call filled the block. This is odd, 243 // but we want to flush full blocks eagerly to get cleanups 244 // running as soon as possible. 245 // 246 // Must only be called if the GC is in the sweep phase (gcphase == _GCoff), 247 // because it does not synchronize with the garbage collector. 248 func (b *cleanupBlock) enqueue(fn *funcval) bool { 249 b.cleanups[b.n] = fn 250 b.n++ 251 return b.full() 252 } 253 254 // full returns true if the cleanup block is full. 255 func (b *cleanupBlock) full() bool { 256 return b.n == uint32(len(b.cleanups)) 257 } 258 259 // empty returns true if the cleanup block is empty. 260 func (b *cleanupBlock) empty() bool { 261 return b.n == 0 262 } 263 264 // take moves as many cleanups as possible from b into a. 265 func (a *cleanupBlock) take(b *cleanupBlock) { 266 dst := a.cleanups[a.n:] 267 if uint32(len(dst)) >= b.n { 268 // Take all. 269 copy(dst, b.cleanups[:]) 270 a.n += b.n 271 b.n = 0 272 } else { 273 // Partial take. Copy from the tail to avoid having 274 // to move more memory around. 275 copy(dst, b.cleanups[b.n-uint32(len(dst)):b.n]) 276 a.n = uint32(len(a.cleanups)) 277 b.n -= uint32(len(dst)) 278 } 279 } 280 281 // cleanupQueue is a queue of ready-to-run cleanup functions. 282 type cleanupQueue struct { 283 // Stack of full cleanup blocks. 284 full lfstack 285 workUnits atomic.Uint64 // length of full; decrement before pop from full, increment after push to full 286 _ [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - unsafe.Sizeof(atomic.Uint64{})]byte 287 288 // Stack of free cleanup blocks. 289 free lfstack 290 291 // flushed indicates whether all local cleanupBlocks have been 292 // flushed, and we're in a period of time where this condition is 293 // stable (after the last sweeper, before the next sweep phase 294 // begins). 295 flushed atomic.Bool // Next to free because frequently accessed together. 296 297 _ [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - 1]byte 298 299 // Linked list of all cleanup blocks. 300 all atomic.UnsafePointer // *cleanupBlock 301 _ [cpu.CacheLinePadSize - unsafe.Sizeof(atomic.UnsafePointer{})]byte 302 303 // Goroutine block state. 304 lock mutex 305 306 // sleeping is the list of sleeping cleanup goroutines. 307 // 308 // Protected by lock. 309 sleeping gList 310 311 // asleep is the number of cleanup goroutines sleeping. 312 // 313 // Read without lock, written only with the lock held. 314 // When the lock is held, the lock holder may only observe 315 // asleep.Load() == sleeping.n. 316 // 317 // To make reading without the lock safe as a signal to wake up 318 // a goroutine and handle new work, it must always be greater 319 // than or equal to sleeping.n. In the periods of time that it 320 // is strictly greater, it may cause spurious calls to wake. 321 asleep atomic.Uint32 322 323 // running indicates the number of cleanup goroutines actively 324 // executing user cleanup functions at any point in time. 325 // 326 // Read and written to without lock. 327 running atomic.Uint32 328 329 // ng is the number of cleanup goroutines. 330 // 331 // Read without lock, written only with lock held. 332 ng atomic.Uint32 333 334 // needg is the number of new cleanup goroutines that 335 // need to be created. 336 // 337 // Read without lock, written only with lock held. 338 needg atomic.Uint32 339 340 // Cleanup queue stats. 341 342 // queued represents a monotonic count of queued cleanups. This is sharded across 343 // Ps via the field cleanupsQueued in each p, so reading just this value is insufficient. 344 // In practice, this value only includes the queued count of dead Ps. 345 // 346 // Writes are protected by STW. 347 queued uint64 348 349 // executed is a monotonic count of executed cleanups. 350 // 351 // Read and updated atomically. 352 executed atomic.Uint64 353 } 354 355 // addWork indicates that n units of parallelizable work have been added to the queue. 356 func (q *cleanupQueue) addWork(n int) { 357 q.workUnits.Add(int64(n)) 358 } 359 360 // tryTakeWork is an attempt to dequeue some work by a cleanup goroutine. 361 // This might fail if there's no work to do. 362 func (q *cleanupQueue) tryTakeWork() bool { 363 for { 364 wu := q.workUnits.Load() 365 if wu == 0 { 366 return false 367 } 368 // CAS to prevent us from going negative. 369 if q.workUnits.CompareAndSwap(wu, wu-1) { 370 return true 371 } 372 } 373 } 374 375 // enqueue queues a single cleanup for execution. 376 // 377 // Called by the sweeper, and only the sweeper. 378 func (q *cleanupQueue) enqueue(fn *funcval) { 379 mp := acquirem() 380 pp := mp.p.ptr() 381 b := pp.cleanups 382 if b == nil { 383 if q.flushed.Load() { 384 q.flushed.Store(false) 385 } 386 b = (*cleanupBlock)(q.free.pop()) 387 if b == nil { 388 b = (*cleanupBlock)(persistentalloc(cleanupBlockSize, tagAlign, &memstats.gcMiscSys)) 389 for { 390 next := (*cleanupBlock)(q.all.Load()) 391 b.alllink = next 392 if q.all.CompareAndSwap(unsafe.Pointer(next), unsafe.Pointer(b)) { 393 break 394 } 395 } 396 } 397 pp.cleanups = b 398 } 399 if full := b.enqueue(fn); full { 400 q.full.push(&b.lfnode) 401 pp.cleanups = nil 402 q.addWork(1) 403 } 404 pp.cleanupsQueued++ 405 releasem(mp) 406 } 407 408 // dequeue pops a block of cleanups from the queue. Blocks until one is available 409 // and never returns nil. 410 func (q *cleanupQueue) dequeue() *cleanupBlock { 411 for { 412 if q.tryTakeWork() { 413 // Guaranteed to be non-nil. 414 return (*cleanupBlock)(q.full.pop()) 415 } 416 lock(&q.lock) 417 // Increment asleep first. We may have to undo this if we abort the sleep. 418 // We must update asleep first because the scheduler might not try to wake 419 // us up when work comes in between the last check of workUnits and when we 420 // go to sleep. (It may see asleep as 0.) By incrementing it here, we guarantee 421 // after this point that if new work comes in, someone will try to grab the 422 // lock and wake us. However, this also means that if we back out, we may cause 423 // someone to spuriously grab the lock and try to wake us up, only to fail. 424 // This should be very rare because the window here is incredibly small: the 425 // window between now and when we decrement q.asleep below. 426 q.asleep.Add(1) 427 428 // Re-check workUnits under the lock and with asleep updated. If it's still zero, 429 // then no new work came in, and it's safe for us to go to sleep. If new work 430 // comes in after this point, then the scheduler will notice that we're sleeping 431 // and wake us up. 432 if q.workUnits.Load() > 0 { 433 // Undo the q.asleep update and try to take work again. 434 q.asleep.Add(-1) 435 unlock(&q.lock) 436 continue 437 } 438 q.sleeping.push(getg()) 439 goparkunlock(&q.lock, waitReasonCleanupWait, traceBlockSystemGoroutine, 1) 440 } 441 } 442 443 // flush pushes all active cleanup blocks to the full list and wakes up cleanup 444 // goroutines to handle them. 445 // 446 // Must only be called at a point when we can guarantee that no more cleanups 447 // are being queued, such as after the final sweeper for the cycle is done 448 // but before the next mark phase. 449 func (q *cleanupQueue) flush() { 450 mp := acquirem() 451 flushed := 0 452 emptied := 0 453 missing := 0 454 455 // Coalesce the partially-filled blocks to present a more accurate picture of demand. 456 // We use the number of coalesced blocks to process as a signal for demand to create 457 // new cleanup goroutines. 458 var cb *cleanupBlock 459 for _, pp := range allp { 460 if pp == nil { 461 // This function is reachable via mallocgc in the 462 // middle of procresize, when allp has been resized, 463 // but the new Ps not allocated yet. 464 missing++ 465 continue 466 } 467 b := pp.cleanups 468 if b == nil { 469 missing++ 470 continue 471 } 472 pp.cleanups = nil 473 if cb == nil { 474 cb = b 475 continue 476 } 477 // N.B. After take, either cb is full, b is empty, or both. 478 cb.take(b) 479 if cb.full() { 480 q.full.push(&cb.lfnode) 481 flushed++ 482 cb = b 483 b = nil 484 } 485 if b != nil && b.empty() { 486 q.free.push(&b.lfnode) 487 emptied++ 488 } 489 } 490 if cb != nil { 491 q.full.push(&cb.lfnode) 492 flushed++ 493 } 494 if flushed != 0 { 495 q.addWork(flushed) 496 } 497 if flushed+emptied+missing != len(allp) { 498 throw("failed to correctly flush all P-owned cleanup blocks") 499 } 500 q.flushed.Store(true) 501 releasem(mp) 502 } 503 504 // needsWake returns true if cleanup goroutines may need to be awoken or created to handle cleanup load. 505 func (q *cleanupQueue) needsWake() bool { 506 return q.workUnits.Load() > 0 && (q.asleep.Load() > 0 || q.ng.Load() < maxCleanupGs()) 507 } 508 509 // wake wakes up one or more goroutines to process the cleanup queue. If there aren't 510 // enough sleeping goroutines to handle the demand, wake will arrange for new goroutines 511 // to be created. 512 func (q *cleanupQueue) wake() { 513 lock(&q.lock) 514 515 // Figure out how many goroutines to wake, and how many extra goroutines to create. 516 // Wake one goroutine for each work unit. 517 var wake, extra uint32 518 work := q.workUnits.Load() 519 asleep := uint64(q.asleep.Load()) 520 if work > asleep { 521 wake = uint32(asleep) 522 if work > uint64(math.MaxUint32) { 523 // Protect against overflow. 524 extra = math.MaxUint32 525 } else { 526 extra = uint32(work - asleep) 527 } 528 } else { 529 wake = uint32(work) 530 extra = 0 531 } 532 if extra != 0 { 533 // Signal that we should create new goroutines, one for each extra work unit, 534 // up to maxCleanupGs. 535 newg := min(extra, maxCleanupGs()-q.ng.Load()) 536 if newg > 0 { 537 q.needg.Add(int32(newg)) 538 } 539 } 540 if wake == 0 { 541 // Nothing to do. 542 unlock(&q.lock) 543 return 544 } 545 546 // Take ownership of waking 'wake' goroutines. 547 // 548 // Nobody else will wake up these goroutines, so they're guaranteed 549 // to be sitting on q.sleeping, waiting for us to wake them. 550 q.asleep.Add(-int32(wake)) 551 552 // Collect them and schedule them. 553 var list gList 554 for range wake { 555 list.push(q.sleeping.pop()) 556 } 557 unlock(&q.lock) 558 559 injectglist(&list) 560 return 561 } 562 563 func (q *cleanupQueue) needG() bool { 564 have := q.ng.Load() 565 if have >= maxCleanupGs() { 566 return false 567 } 568 if have == 0 { 569 // Make sure we have at least one. 570 return true 571 } 572 return q.needg.Load() > 0 573 } 574 575 func (q *cleanupQueue) createGs() { 576 lock(&q.lock) 577 have := q.ng.Load() 578 need := min(q.needg.Swap(0), maxCleanupGs()-have) 579 if have == 0 && need == 0 { 580 // Make sure we have at least one. 581 need = 1 582 } 583 if need > 0 { 584 q.ng.Add(int32(need)) 585 } 586 unlock(&q.lock) 587 588 for range need { 589 go runCleanups() 590 } 591 } 592 593 func (q *cleanupQueue) beginRunningCleanups() { 594 // Update runningCleanups and running atomically with respect 595 // to goroutine profiles by disabling preemption. 596 mp := acquirem() 597 getg().runningCleanups.Store(true) 598 q.running.Add(1) 599 releasem(mp) 600 } 601 602 func (q *cleanupQueue) endRunningCleanups() { 603 // Update runningCleanups and running atomically with respect 604 // to goroutine profiles by disabling preemption. 605 mp := acquirem() 606 getg().runningCleanups.Store(false) 607 q.running.Add(-1) 608 releasem(mp) 609 } 610 611 func (q *cleanupQueue) readQueueStats() (queued, executed uint64) { 612 executed = q.executed.Load() 613 queued = q.queued 614 615 // N.B. This is inconsistent, but that's intentional. It's just an estimate. 616 // Read this _after_ reading executed to decrease the chance that we observe 617 // an inconsistency in the statistics (executed > queued). 618 for _, pp := range allp { 619 queued += pp.cleanupsQueued 620 } 621 return 622 } 623 624 func maxCleanupGs() uint32 { 625 // N.B. Left as a function to make changing the policy easier. 626 return uint32(max(gomaxprocs/4, 1)) 627 } 628 629 // gcCleanups is the global cleanup queue. 630 var gcCleanups cleanupQueue 631 632 // runCleanups is the entrypoint for all cleanup-running goroutines. 633 func runCleanups() { 634 for { 635 b := gcCleanups.dequeue() 636 if raceenabled { 637 // Approximately: adds a happens-before edge between the cleanup 638 // argument being mutated and the call to the cleanup below. 639 racefingo() 640 } 641 642 gcCleanups.beginRunningCleanups() 643 for i := 0; i < int(b.n); i++ { 644 fn := b.cleanups[i] 645 646 var racectx uintptr 647 if raceenabled { 648 // Enter a new race context so the race detector can catch 649 // potential races between cleanups, even if they execute on 650 // the same goroutine. 651 // 652 // Synchronize on fn. This would fail to find races on the 653 // closed-over values in fn (suppose fn is passed to multiple 654 // AddCleanup calls) if fn was not unique, but it is. Update 655 // the synchronization on fn if you intend to optimize it 656 // and store the cleanup function and cleanup argument on the 657 // queue directly. 658 racerelease(unsafe.Pointer(fn)) 659 racectx = raceEnterNewCtx() 660 raceacquire(unsafe.Pointer(fn)) 661 } 662 663 // Execute the next cleanup. 664 cleanup := *(*func())(unsafe.Pointer(&fn)) 665 cleanup() 666 b.cleanups[i] = nil 667 668 if raceenabled { 669 // Restore the old context. 670 raceRestoreCtx(racectx) 671 } 672 } 673 gcCleanups.endRunningCleanups() 674 gcCleanups.executed.Add(int64(b.n)) 675 676 atomic.Store(&b.n, 0) // Synchronize with markroot. See comment in cleanupBlockHeader. 677 gcCleanups.free.push(&b.lfnode) 678 } 679 } 680 681 // blockUntilEmpty blocks until either the cleanup queue is emptied 682 // and the cleanups have been executed, or the timeout is reached. 683 // Returns true if the cleanup queue was emptied. 684 // This is used by the sync and unique tests. 685 func (q *cleanupQueue) blockUntilEmpty(timeout int64) bool { 686 start := nanotime() 687 for nanotime()-start < timeout { 688 lock(&q.lock) 689 // The queue is empty when there's no work left to do *and* all the cleanup goroutines 690 // are asleep. If they're not asleep, they may be actively working on a block. 691 if q.flushed.Load() && q.full.empty() && uint32(q.sleeping.size) == q.ng.Load() { 692 unlock(&q.lock) 693 return true 694 } 695 unlock(&q.lock) 696 Gosched() 697 } 698 return false 699 } 700 701 //go:linkname unique_runtime_blockUntilEmptyCleanupQueue unique.runtime_blockUntilEmptyCleanupQueue 702 func unique_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool { 703 return gcCleanups.blockUntilEmpty(timeout) 704 } 705 706 //go:linkname sync_test_runtime_blockUntilEmptyCleanupQueue sync_test.runtime_blockUntilEmptyCleanupQueue 707 func sync_test_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool { 708 return gcCleanups.blockUntilEmpty(timeout) 709 } 710 711 // raceEnterNewCtx creates a new racectx and switches the current 712 // goroutine to it. Returns the old racectx. 713 // 714 // Must be running on a user goroutine. nosplit to match other race 715 // instrumentation. 716 // 717 //go:nosplit 718 func raceEnterNewCtx() uintptr { 719 // We use the existing ctx as the spawn context, but gp.gopc 720 // as the spawn PC to make the error output a little nicer 721 // (pointing to AddCleanup, where the goroutines are created). 722 // 723 // We also need to carefully indicate to the race detector 724 // that the goroutine stack will only be accessed by the new 725 // race context, to avoid false positives on stack locations. 726 // We do this by marking the stack as free in the first context 727 // and then re-marking it as allocated in the second. Crucially, 728 // there must be (1) no race operations and (2) no stack changes 729 // in between. (1) is easy to avoid because we're in the runtime 730 // so there's no implicit race instrumentation. To avoid (2) we 731 // defensively become non-preemptible so the GC can't stop us, 732 // and rely on the fact that racemalloc, racefreem, and racectx 733 // are nosplit. 734 mp := acquirem() 735 gp := getg() 736 ctx := getg().racectx 737 racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 738 getg().racectx = racectxstart(gp.gopc, ctx) 739 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 740 releasem(mp) 741 return ctx 742 } 743 744 // raceRestoreCtx restores ctx on the goroutine. It is the inverse of 745 // raceenternewctx and must be called with its result. 746 // 747 // Must be running on a user goroutine. nosplit to match other race 748 // instrumentation. 749 // 750 //go:nosplit 751 func raceRestoreCtx(ctx uintptr) { 752 mp := acquirem() 753 gp := getg() 754 racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 755 racectxend(getg().racectx) 756 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 757 getg().racectx = ctx 758 releasem(mp) 759 } 760