Source file src/runtime/traceallocfree.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime -> tracer API for memory events.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/runtime/gc"
    12  	"internal/runtime/sys"
    13  	"internal/trace/tracev2"
    14  )
    15  
    16  // Batch type values for the alloc/free experiment.
    17  const (
    18  	traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
    19  	traceAllocFreeInfoBatch         // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
    20  )
    21  
    22  // traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
    23  // (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
    24  //
    25  // The world must be stopped and tracing must be enabled when this function is called.
    26  func traceSnapshotMemory(gen uintptr) {
    27  	assertWorldStopped()
    28  
    29  	// Write a batch containing information that'll be necessary to
    30  	// interpret the events.
    31  	var flushed bool
    32  	w := unsafeTraceExpWriter(gen, nil, tracev2.AllocFree)
    33  	w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
    34  	if flushed {
    35  		// Annotate the batch as containing additional info.
    36  		w.byte(byte(traceAllocFreeInfoBatch))
    37  	}
    38  
    39  	// Emit info.
    40  	w.varint(uint64(trace.minPageHeapAddr))
    41  	w.varint(uint64(pageSize))
    42  	w.varint(uint64(gc.MinHeapAlign))
    43  	w.varint(uint64(fixedStack))
    44  
    45  	// Finish writing the batch.
    46  	w.flush().end()
    47  
    48  	// Start tracing.
    49  	trace := traceAcquire()
    50  	if !trace.ok() {
    51  		throw("traceSnapshotMemory: tracing is not enabled")
    52  	}
    53  
    54  	// Write out all the heap spans and heap objects.
    55  	for _, s := range mheap_.allspans {
    56  		if s.state.get() == mSpanDead {
    57  			continue
    58  		}
    59  		// It's some kind of span, so trace that it exists.
    60  		trace.SpanExists(s)
    61  
    62  		// Write out allocated objects if it's a heap span.
    63  		if s.state.get() != mSpanInUse {
    64  			continue
    65  		}
    66  
    67  		// Find all allocated objects.
    68  		abits := s.allocBitsForIndex(0)
    69  		for i := uintptr(0); i < uintptr(s.nelems); i++ {
    70  			if abits.index < uintptr(s.freeindex) || abits.isMarked() {
    71  				x := s.base() + i*s.elemsize
    72  				trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
    73  			}
    74  			abits.advance()
    75  		}
    76  	}
    77  
    78  	// Write out all the goroutine stacks.
    79  	forEachGRace(func(gp *g) {
    80  		trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
    81  	})
    82  	traceRelease(trace)
    83  }
    84  
    85  func traceSpanTypeAndClass(s *mspan) traceArg {
    86  	if s.state.get() == mSpanInUse {
    87  		return traceArg(s.spanclass) << 1
    88  	}
    89  	return traceArg(1)
    90  }
    91  
    92  // SpanExists records an event indicating that the span exists.
    93  func (tl traceLocker) SpanExists(s *mspan) {
    94  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
    95  }
    96  
    97  // SpanAlloc records an event indicating that the span has just been allocated.
    98  func (tl traceLocker) SpanAlloc(s *mspan) {
    99  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
   100  }
   101  
   102  // SpanFree records an event indicating that the span is about to be freed.
   103  func (tl traceLocker) SpanFree(s *mspan) {
   104  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpanFree, traceSpanID(s))
   105  }
   106  
   107  // traceSpanID creates a trace ID for the span s for the trace.
   108  func traceSpanID(s *mspan) traceArg {
   109  	return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
   110  }
   111  
   112  // HeapObjectExists records that an object already exists at addr with the provided type.
   113  // The type is optional, and the size of the slot occupied the object is inferred from the
   114  // span containing it.
   115  func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
   116  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
   117  }
   118  
   119  // HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
   120  // The type is optional, and the size of the slot occupied the object is inferred from the
   121  // span containing it.
   122  func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
   123  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
   124  }
   125  
   126  // HeapObjectFree records that an object at addr is about to be freed.
   127  func (tl traceLocker) HeapObjectFree(addr uintptr) {
   128  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObjectFree, traceHeapObjectID(addr))
   129  }
   130  
   131  // traceHeapObjectID creates a trace ID for a heap object at address addr.
   132  func traceHeapObjectID(addr uintptr) traceArg {
   133  	return traceArg(uint64(addr)-trace.minPageHeapAddr) / gc.MinHeapAlign
   134  }
   135  
   136  // GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
   137  func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
   138  	order := traceCompressStackSize(size)
   139  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStack, traceGoroutineStackID(base), order)
   140  }
   141  
   142  // GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
   143  func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
   144  	order := traceCompressStackSize(size)
   145  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStackAlloc, traceGoroutineStackID(base), order)
   146  }
   147  
   148  // GoroutineStackFree records that a goroutine stack at address base is about to be freed.
   149  func (tl traceLocker) GoroutineStackFree(base uintptr) {
   150  	tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStackFree, traceGoroutineStackID(base))
   151  }
   152  
   153  // traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
   154  func traceGoroutineStackID(base uintptr) traceArg {
   155  	return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
   156  }
   157  
   158  // traceCompressStackSize assumes size is a power of 2 and returns log2(size).
   159  func traceCompressStackSize(size uintptr) traceArg {
   160  	if size&(size-1) != 0 {
   161  		throw("goroutine stack size is not a power of 2")
   162  	}
   163  	return traceArg(sys.Len64(uint64(size)))
   164  }
   165  

View as plain text