Source file src/runtime/mcleanup.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"unsafe"
    10  )
    11  
    12  // AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer
    13  // reachable, the runtime will call cleanup(arg) in a separate goroutine.
    14  //
    15  // A typical use is that ptr is an object wrapping an underlying resource (e.g.,
    16  // a File object wrapping an OS file descriptor), arg is the underlying resource
    17  // (e.g., the OS file descriptor), and the cleanup function releases the underlying
    18  // resource (e.g., by calling the close system call).
    19  //
    20  // There are few constraints on ptr. In particular, multiple cleanups may be
    21  // attached to the same pointer, or to different pointers within the same
    22  // allocation.
    23  //
    24  // If ptr is reachable from cleanup or arg, ptr will never be collected
    25  // and the cleanup will never run. As a protection against simple cases of this,
    26  // AddCleanup panics if arg is equal to ptr.
    27  //
    28  // There is no specified order in which cleanups will run.
    29  // In particular, if several objects point to each other and all become
    30  // unreachable at the same time, their cleanups all become eligible to run
    31  // and can run in any order. This is true even if the objects form a cycle.
    32  //
    33  // A single goroutine runs all cleanup calls for a program, sequentially. If a
    34  // cleanup function must run for a long time, it should create a new goroutine.
    35  //
    36  // If ptr has both a cleanup and a finalizer, the cleanup will only run once
    37  // it has been finalized and becomes unreachable without an associated finalizer.
    38  //
    39  // The cleanup(arg) call is not always guaranteed to run; in particular it is not
    40  // guaranteed to run before program exit.
    41  //
    42  // Cleanups are not guaranteed to run if the size of T is zero bytes, because
    43  // it may share same address with other zero-size objects in memory. See
    44  // https://go.dev/ref/spec#Size_and_alignment_guarantees.
    45  //
    46  // It is not guaranteed that a cleanup will run for objects allocated
    47  // in initializers for package-level variables. Such objects may be
    48  // linker-allocated, not heap-allocated.
    49  //
    50  // Note that because cleanups may execute arbitrarily far into the future
    51  // after an object is no longer referenced, the runtime is allowed to perform
    52  // a space-saving optimization that batches objects together in a single
    53  // allocation slot. The cleanup for an unreferenced object in such an
    54  // allocation may never run if it always exists in the same batch as a
    55  // referenced object. Typically, this batching only happens for tiny
    56  // (on the order of 16 bytes or less) and pointer-free objects.
    57  //
    58  // A cleanup may run as soon as an object becomes unreachable.
    59  // In order to use cleanups correctly, the program must ensure that
    60  // the object is reachable until it is safe to run its cleanup.
    61  // Objects stored in global variables, or that can be found by tracing
    62  // pointers from a global variable, are reachable. A function argument or
    63  // receiver may become unreachable at the last point where the function
    64  // mentions it. To ensure a cleanup does not get called prematurely,
    65  // pass the object to the [KeepAlive] function after the last point
    66  // where the object must remain reachable.
    67  func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
    68  	// Explicitly force ptr to escape to the heap.
    69  	ptr = abi.Escape(ptr)
    70  
    71  	// The pointer to the object must be valid.
    72  	if ptr == nil {
    73  		throw("runtime.AddCleanup: ptr is nil")
    74  	}
    75  	usptr := uintptr(unsafe.Pointer(ptr))
    76  
    77  	// Check that arg is not equal to ptr.
    78  	// TODO(67535) this does not cover the case where T and *S are the same
    79  	// type and ptr and arg are equal.
    80  	if unsafe.Pointer(&arg) == unsafe.Pointer(ptr) {
    81  		throw("runtime.AddCleanup: ptr is equal to arg, cleanup will never run")
    82  	}
    83  	if inUserArenaChunk(usptr) {
    84  		// Arena-allocated objects are not eligible for cleanup.
    85  		throw("runtime.AddCleanup: ptr is arena-allocated")
    86  	}
    87  	if debug.sbrk != 0 {
    88  		// debug.sbrk never frees memory, so no cleanup will ever run
    89  		// (and we don't have the data structures to record them).
    90  		// Return a noop cleanup.
    91  		return Cleanup{}
    92  	}
    93  
    94  	fn := func() {
    95  		cleanup(arg)
    96  	}
    97  	// Closure must escape.
    98  	fv := *(**funcval)(unsafe.Pointer(&fn))
    99  	fv = abi.Escape(fv)
   100  
   101  	// Find the containing object.
   102  	base, _, _ := findObject(usptr, 0, 0)
   103  	if base == 0 {
   104  		if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) {
   105  			// Cleanup is a noop.
   106  			return Cleanup{}
   107  		}
   108  		throw("runtime.AddCleanup: ptr not in allocated block")
   109  	}
   110  
   111  	// Ensure we have a finalizer processing goroutine running.
   112  	createfing()
   113  
   114  	id := addCleanup(unsafe.Pointer(ptr), fv)
   115  	return Cleanup{
   116  		id:  id,
   117  		ptr: usptr,
   118  	}
   119  }
   120  
   121  // Cleanup is a handle to a cleanup call for a specific object.
   122  type Cleanup struct {
   123  	// id is the unique identifier for the cleanup within the arena.
   124  	id uint64
   125  	// ptr contains the pointer to the object.
   126  	ptr uintptr
   127  }
   128  
   129  // Stop cancels the cleanup call. Stop will have no effect if the cleanup call
   130  // has already been queued for execution (because ptr became unreachable).
   131  // To guarantee that Stop removes the cleanup function, the caller must ensure
   132  // that the pointer that was passed to AddCleanup is reachable across the call to Stop.
   133  func (c Cleanup) Stop() {
   134  	if c.id == 0 {
   135  		// id is set to zero when the cleanup is a noop.
   136  		return
   137  	}
   138  
   139  	// The following block removes the Special record of type cleanup for the object c.ptr.
   140  	span := spanOfHeap(uintptr(unsafe.Pointer(c.ptr)))
   141  	if span == nil {
   142  		return
   143  	}
   144  	// Ensure that the span is swept.
   145  	// Sweeping accesses the specials list w/o locks, so we have
   146  	// to synchronize with it. And it's just much safer.
   147  	mp := acquirem()
   148  	span.ensureSwept()
   149  
   150  	offset := uintptr(unsafe.Pointer(c.ptr)) - span.base()
   151  
   152  	var found *special
   153  	lock(&span.speciallock)
   154  
   155  	iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup)
   156  	if exists {
   157  		for {
   158  			s := *iter
   159  			if s == nil {
   160  				// Reached the end of the linked list. Stop searching at this point.
   161  				break
   162  			}
   163  			if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind &&
   164  				(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
   165  				// The special is a cleanup and contains a matching cleanup id.
   166  				*iter = s.next
   167  				found = s
   168  				break
   169  			}
   170  			if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) {
   171  				// The special is outside the region specified for that kind of
   172  				// special. The specials are sorted by kind.
   173  				break
   174  			}
   175  			// Try the next special.
   176  			iter = &s.next
   177  		}
   178  	}
   179  	if span.specials == nil {
   180  		spanHasNoSpecials(span)
   181  	}
   182  	unlock(&span.speciallock)
   183  	releasem(mp)
   184  
   185  	if found == nil {
   186  		return
   187  	}
   188  	lock(&mheap_.speciallock)
   189  	mheap_.specialCleanupAlloc.free(unsafe.Pointer(found))
   190  	unlock(&mheap_.speciallock)
   191  }
   192  

View as plain text