Source file src/runtime/mbarrier.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: write barriers.
     6  //
     7  // For the concurrent garbage collector, the Go compiler implements
     8  // updates to pointer-valued fields that may be in heap objects by
     9  // emitting calls to write barriers. The main write barrier for
    10  // individual pointer writes is gcWriteBarrier and is implemented in
    11  // assembly. This file contains write barrier entry points for bulk
    12  // operations. See also mwbbuf.go.
    13  
    14  package runtime
    15  
    16  import (
    17  	"internal/abi"
    18  	"internal/goarch"
    19  	"internal/goexperiment"
    20  	"internal/runtime/sys"
    21  	"unsafe"
    22  )
    23  
    24  // Go uses a hybrid barrier that combines a Yuasa-style deletion
    25  // barrier—which shades the object whose reference is being
    26  // overwritten—with Dijkstra insertion barrier—which shades the object
    27  // whose reference is being written. The insertion part of the barrier
    28  // is necessary while the calling goroutine's stack is grey. In
    29  // pseudocode, the barrier is:
    30  //
    31  //     writePointer(slot, ptr):
    32  //         shade(*slot)
    33  //         if current stack is grey:
    34  //             shade(ptr)
    35  //         *slot = ptr
    36  //
    37  // slot is the destination in Go code.
    38  // ptr is the value that goes into the slot in Go code.
    39  //
    40  // Shade indicates that it has seen a white pointer by adding the referent
    41  // to wbuf as well as marking it.
    42  //
    43  // The two shades and the condition work together to prevent a mutator
    44  // from hiding an object from the garbage collector:
    45  //
    46  // 1. shade(*slot) prevents a mutator from hiding an object by moving
    47  // the sole pointer to it from the heap to its stack. If it attempts
    48  // to unlink an object from the heap, this will shade it.
    49  //
    50  // 2. shade(ptr) prevents a mutator from hiding an object by moving
    51  // the sole pointer to it from its stack into a black object in the
    52  // heap. If it attempts to install the pointer into a black object,
    53  // this will shade it.
    54  //
    55  // 3. Once a goroutine's stack is black, the shade(ptr) becomes
    56  // unnecessary. shade(ptr) prevents hiding an object by moving it from
    57  // the stack to the heap, but this requires first having a pointer
    58  // hidden on the stack. Immediately after a stack is scanned, it only
    59  // points to shaded objects, so it's not hiding anything, and the
    60  // shade(*slot) prevents it from hiding any other pointers on its
    61  // stack.
    62  //
    63  // For a detailed description of this barrier and proof of
    64  // correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
    65  //
    66  //
    67  //
    68  // Dealing with memory ordering:
    69  //
    70  // Both the Yuasa and Dijkstra barriers can be made conditional on the
    71  // color of the object containing the slot. We chose not to make these
    72  // conditional because the cost of ensuring that the object holding
    73  // the slot doesn't concurrently change color without the mutator
    74  // noticing seems prohibitive.
    75  //
    76  // Consider the following example where the mutator writes into
    77  // a slot and then loads the slot's mark bit while the GC thread
    78  // writes to the slot's mark bit and then as part of scanning reads
    79  // the slot.
    80  //
    81  // Initially both [slot] and [slotmark] are 0 (nil)
    82  // Mutator thread          GC thread
    83  // st [slot], ptr          st [slotmark], 1
    84  //
    85  // ld r1, [slotmark]       ld r2, [slot]
    86  //
    87  // Without an expensive memory barrier between the st and the ld, the final
    88  // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
    89  // example of what can happen when loads are allowed to be reordered with older
    90  // stores (avoiding such reorderings lies at the heart of the classic
    91  // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
    92  // barriers, which will slow down both the mutator and the GC, we always grey
    93  // the ptr object regardless of the slot's color.
    94  //
    95  //
    96  // Stack writes:
    97  //
    98  // The compiler omits write barriers for writes to the current frame,
    99  // but if a stack pointer has been passed down the call stack, the
   100  // compiler will generate a write barrier for writes through that
   101  // pointer (because it doesn't know it's not a heap pointer).
   102  //
   103  //
   104  // Global writes:
   105  //
   106  // The Go garbage collector requires write barriers when heap pointers
   107  // are stored in globals. Many garbage collectors ignore writes to
   108  // globals and instead pick up global -> heap pointers during
   109  // termination. This increases pause time, so we instead rely on write
   110  // barriers for writes to globals so that we don't have to rescan
   111  // global during mark termination.
   112  //
   113  //
   114  // Publication ordering:
   115  //
   116  // The write barrier is *pre-publication*, meaning that the write
   117  // barrier happens prior to the *slot = ptr write that may make ptr
   118  // reachable by some goroutine that currently cannot reach it.
   119  //
   120  //
   121  // Signal handler pointer writes:
   122  //
   123  // In general, the signal handler cannot safely invoke the write
   124  // barrier because it may run without a P or even during the write
   125  // barrier.
   126  //
   127  // There is exactly one exception: profbuf.go omits a barrier during
   128  // signal handler profile logging. That's safe only because of the
   129  // deletion barrier. See profbuf.go for a detailed argument. If we
   130  // remove the deletion barrier, we'll have to work out a new way to
   131  // handle the profile logging.
   132  
   133  // typedmemmove copies a value of type typ to dst from src.
   134  // Must be nosplit, see #16026.
   135  //
   136  // TODO: Perfect for go:nosplitrec since we can't have a safe point
   137  // anywhere in the bulk barrier or memmove.
   138  //
   139  // typedmemmove should be an internal detail,
   140  // but widely used packages access it using linkname.
   141  // Notable members of the hall of shame include:
   142  //   - github.com/RomiChan/protobuf
   143  //   - github.com/segmentio/encoding
   144  //
   145  // Do not remove or change the type signature.
   146  // See go.dev/issue/67401.
   147  //
   148  //go:linkname typedmemmove
   149  //go:nosplit
   150  func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
   151  	if dst == src {
   152  		return
   153  	}
   154  	if writeBarrier.enabled && typ.Pointers() {
   155  		// This always copies a full value of type typ so it's safe
   156  		// to pass typ along as an optimization. See the comment on
   157  		// bulkBarrierPreWrite.
   158  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
   159  	}
   160  	// There's a race here: if some other goroutine can write to
   161  	// src, it may change some pointer in src after we've
   162  	// performed the write barrier but before we perform the
   163  	// memory copy. This safe because the write performed by that
   164  	// other goroutine must also be accompanied by a write
   165  	// barrier, so at worst we've unnecessarily greyed the old
   166  	// pointer that was in src.
   167  	memmove(dst, src, typ.Size_)
   168  	if goexperiment.CgoCheck2 {
   169  		cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
   170  	}
   171  }
   172  
   173  // wbZero performs the write barrier operations necessary before
   174  // zeroing a region of memory at address dst of type typ.
   175  // Does not actually do the zeroing.
   176  //
   177  //go:nowritebarrierrec
   178  //go:nosplit
   179  func wbZero(typ *_type, dst unsafe.Pointer) {
   180  	// This always copies a full value of type typ so it's safe
   181  	// to pass typ along as an optimization. See the comment on
   182  	// bulkBarrierPreWrite.
   183  	bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ)
   184  }
   185  
   186  // wbMove performs the write barrier operations necessary before
   187  // copying a region of memory from src to dst of type typ.
   188  // Does not actually do the copying.
   189  //
   190  //go:nowritebarrierrec
   191  //go:nosplit
   192  func wbMove(typ *_type, dst, src unsafe.Pointer) {
   193  	// This always copies a full value of type typ so it's safe to
   194  	// pass a type here.
   195  	//
   196  	// See the comment on bulkBarrierPreWrite.
   197  	bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
   198  }
   199  
   200  // reflect_typedmemmove is meant for package reflect,
   201  // but widely used packages access it using linkname.
   202  // Notable members of the hall of shame include:
   203  //   - gitee.com/quant1x/gox
   204  //   - github.com/goccy/json
   205  //   - github.com/modern-go/reflect2
   206  //   - github.com/ugorji/go/codec
   207  //   - github.com/v2pro/plz
   208  //
   209  // Do not remove or change the type signature.
   210  // See go.dev/issue/67401.
   211  //
   212  //go:linkname reflect_typedmemmove reflect.typedmemmove
   213  func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   214  	if raceenabled {
   215  		raceWriteObjectPC(typ, dst, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
   216  		raceReadObjectPC(typ, src, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
   217  	}
   218  	if msanenabled {
   219  		msanwrite(dst, typ.Size_)
   220  		msanread(src, typ.Size_)
   221  	}
   222  	if asanenabled {
   223  		asanwrite(dst, typ.Size_)
   224  		asanread(src, typ.Size_)
   225  	}
   226  	typedmemmove(typ, dst, src)
   227  }
   228  
   229  //go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
   230  func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   231  	reflect_typedmemmove(typ, dst, src)
   232  }
   233  
   234  //go:linkname maps_typedmemmove internal/runtime/maps.typedmemmove
   235  func maps_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   236  	typedmemmove(typ, dst, src)
   237  }
   238  
   239  // reflectcallmove is invoked by reflectcall to copy the return values
   240  // out of the stack and into the heap, invoking the necessary write
   241  // barriers. dst, src, and size describe the return value area to
   242  // copy. typ describes the entire frame (not just the return values).
   243  // typ may be nil, which indicates write barriers are not needed.
   244  //
   245  // It must be nosplit and must only call nosplit functions because the
   246  // stack map of reflectcall is wrong.
   247  //
   248  //go:nosplit
   249  func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
   250  	if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
   251  		// Pass nil for the type. dst does not point to value of type typ,
   252  		// but rather points into one, so applying the optimization is not
   253  		// safe. See the comment on this function.
   254  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil)
   255  	}
   256  	memmove(dst, src, size)
   257  
   258  	// Move pointers returned in registers to a place where the GC can see them.
   259  	for i := range regs.Ints {
   260  		if regs.ReturnIsPtr.Get(i) {
   261  			regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
   262  		}
   263  	}
   264  }
   265  
   266  // typedslicecopy should be an internal detail,
   267  // but widely used packages access it using linkname.
   268  // Notable members of the hall of shame include:
   269  //   - github.com/segmentio/encoding
   270  //
   271  // Do not remove or change the type signature.
   272  // See go.dev/issue/67401.
   273  //
   274  //go:linkname typedslicecopy
   275  //go:nosplit
   276  func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
   277  	n := dstLen
   278  	if n > srcLen {
   279  		n = srcLen
   280  	}
   281  	if n == 0 {
   282  		return 0
   283  	}
   284  
   285  	// The compiler emits calls to typedslicecopy before
   286  	// instrumentation runs, so unlike the other copying and
   287  	// assignment operations, it's not instrumented in the calling
   288  	// code and needs its own instrumentation.
   289  	if raceenabled {
   290  		callerpc := sys.GetCallerPC()
   291  		pc := abi.FuncPCABIInternal(slicecopy)
   292  		racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
   293  		racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
   294  	}
   295  	if msanenabled {
   296  		msanwrite(dstPtr, uintptr(n)*typ.Size_)
   297  		msanread(srcPtr, uintptr(n)*typ.Size_)
   298  	}
   299  	if asanenabled {
   300  		asanwrite(dstPtr, uintptr(n)*typ.Size_)
   301  		asanread(srcPtr, uintptr(n)*typ.Size_)
   302  	}
   303  
   304  	if goexperiment.CgoCheck2 {
   305  		cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
   306  	}
   307  
   308  	if dstPtr == srcPtr {
   309  		return n
   310  	}
   311  
   312  	// Note: No point in checking typ.PtrBytes here:
   313  	// compiler only emits calls to typedslicecopy for types with pointers,
   314  	// and growslice and reflect_typedslicecopy check for pointers
   315  	// before calling typedslicecopy.
   316  	size := uintptr(n) * typ.Size_
   317  	if writeBarrier.enabled {
   318  		// This always copies one or more full values of type typ so
   319  		// it's safe to pass typ along as an optimization. See the comment on
   320  		// bulkBarrierPreWrite.
   321  		pwsize := size - typ.Size_ + typ.PtrBytes
   322  		bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ)
   323  	}
   324  	// See typedmemmove for a discussion of the race between the
   325  	// barrier and memmove.
   326  	memmove(dstPtr, srcPtr, size)
   327  	return n
   328  }
   329  
   330  // reflect_typedslicecopy is meant for package reflect,
   331  // but widely used packages access it using linkname.
   332  // Notable members of the hall of shame include:
   333  //   - gitee.com/quant1x/gox
   334  //   - github.com/modern-go/reflect2
   335  //   - github.com/RomiChan/protobuf
   336  //   - github.com/segmentio/encoding
   337  //   - github.com/v2pro/plz
   338  //
   339  // Do not remove or change the type signature.
   340  // See go.dev/issue/67401.
   341  //
   342  //go:linkname reflect_typedslicecopy reflect.typedslicecopy
   343  func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
   344  	if !elemType.Pointers() {
   345  		return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
   346  	}
   347  	return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
   348  }
   349  
   350  // typedmemclr clears the typed memory at ptr with type typ. The
   351  // memory at ptr must already be initialized (and hence in type-safe
   352  // state). If the memory is being initialized for the first time, see
   353  // memclrNoHeapPointers.
   354  //
   355  // If the caller knows that typ has pointers, it can alternatively
   356  // call memclrHasPointers.
   357  //
   358  // TODO: A "go:nosplitrec" annotation would be perfect for this.
   359  //
   360  //go:nosplit
   361  func typedmemclr(typ *_type, ptr unsafe.Pointer) {
   362  	if writeBarrier.enabled && typ.Pointers() {
   363  		// This always clears a whole value of type typ, so it's
   364  		// safe to pass a type here and apply the optimization.
   365  		// See the comment on bulkBarrierPreWrite.
   366  		bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ)
   367  	}
   368  	memclrNoHeapPointers(ptr, typ.Size_)
   369  }
   370  
   371  // reflect_typedslicecopy is meant for package reflect,
   372  // but widely used packages access it using linkname.
   373  // Notable members of the hall of shame include:
   374  //   - github.com/ugorji/go/codec
   375  //
   376  // Do not remove or change the type signature.
   377  // See go.dev/issue/67401.
   378  //
   379  //go:linkname reflect_typedmemclr reflect.typedmemclr
   380  func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
   381  	typedmemclr(typ, ptr)
   382  }
   383  
   384  //go:linkname maps_typedmemclr internal/runtime/maps.typedmemclr
   385  func maps_typedmemclr(typ *_type, ptr unsafe.Pointer) {
   386  	typedmemclr(typ, ptr)
   387  }
   388  
   389  //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
   390  func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
   391  	if writeBarrier.enabled && typ.Pointers() {
   392  		// Pass nil for the type. ptr does not point to value of type typ,
   393  		// but rather points into one so it's not safe to apply the optimization.
   394  		// See the comment on this function in the reflect package and the
   395  		// comment on bulkBarrierPreWrite.
   396  		bulkBarrierPreWrite(uintptr(ptr), 0, size, nil)
   397  	}
   398  	memclrNoHeapPointers(ptr, size)
   399  }
   400  
   401  //go:linkname reflect_typedarrayclear reflect.typedarrayclear
   402  func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
   403  	size := typ.Size_ * uintptr(len)
   404  	if writeBarrier.enabled && typ.Pointers() {
   405  		// This always clears whole elements of an array, so it's
   406  		// safe to pass a type here. See the comment on bulkBarrierPreWrite.
   407  		bulkBarrierPreWrite(uintptr(ptr), 0, size, typ)
   408  	}
   409  	memclrNoHeapPointers(ptr, size)
   410  }
   411  
   412  // memclrHasPointers clears n bytes of typed memory starting at ptr.
   413  // The caller must ensure that the type of the object at ptr has
   414  // pointers, usually by checking typ.PtrBytes. However, ptr
   415  // does not have to point to the start of the allocation.
   416  //
   417  // memclrHasPointers should be an internal detail,
   418  // but widely used packages access it using linkname.
   419  // Notable members of the hall of shame include:
   420  //   - github.com/bytedance/sonic
   421  //
   422  // Do not remove or change the type signature.
   423  // See go.dev/issue/67401.
   424  //
   425  //go:linkname memclrHasPointers
   426  //go:nosplit
   427  func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
   428  	// Pass nil for the type since we don't have one here anyway.
   429  	bulkBarrierPreWrite(uintptr(ptr), 0, n, nil)
   430  	memclrNoHeapPointers(ptr, n)
   431  }
   432  

View as plain text