Source file
src/runtime/traceallocfree.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/runtime/sys"
12 )
13
14
15 const (
16 traceAllocFreeTypesBatch = iota
17 traceAllocFreeInfoBatch
18 )
19
20
21
22
23
24 func traceSnapshotMemory(gen uintptr) {
25 assertWorldStopped()
26
27
28
29 var flushed bool
30 w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
31 w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
32 if flushed {
33
34 w.byte(byte(traceAllocFreeInfoBatch))
35 }
36
37
38 w.varint(uint64(trace.minPageHeapAddr))
39 w.varint(uint64(pageSize))
40 w.varint(uint64(minHeapAlign))
41 w.varint(uint64(fixedStack))
42
43
44 w.flush().end()
45
46
47 trace := traceAcquire()
48 if !trace.ok() {
49 throw("traceSnapshotMemory: tracing is not enabled")
50 }
51
52
53 for _, s := range mheap_.allspans {
54 if s.state.get() == mSpanDead {
55 continue
56 }
57
58 trace.SpanExists(s)
59
60
61 if s.state.get() != mSpanInUse {
62 continue
63 }
64
65
66 abits := s.allocBitsForIndex(0)
67 for i := uintptr(0); i < uintptr(s.nelems); i++ {
68 if abits.index < uintptr(s.freeindex) || abits.isMarked() {
69 x := s.base() + i*s.elemsize
70 trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
71 }
72 abits.advance()
73 }
74 }
75
76
77 forEachGRace(func(gp *g) {
78 trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
79 })
80 traceRelease(trace)
81 }
82
83 func traceSpanTypeAndClass(s *mspan) traceArg {
84 if s.state.get() == mSpanInUse {
85 return traceArg(s.spanclass) << 1
86 }
87 return traceArg(1)
88 }
89
90
91 func (tl traceLocker) SpanExists(s *mspan) {
92 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
93 }
94
95
96 func (tl traceLocker) SpanAlloc(s *mspan) {
97 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
98 }
99
100
101 func (tl traceLocker) SpanFree(s *mspan) {
102 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanFree, traceSpanID(s))
103 }
104
105
106 func traceSpanID(s *mspan) traceArg {
107 return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
108 }
109
110
111
112
113 func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
114 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
115 }
116
117
118
119
120 func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
121 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
122 }
123
124
125 func (tl traceLocker) HeapObjectFree(addr uintptr) {
126 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectFree, traceHeapObjectID(addr))
127 }
128
129
130 func traceHeapObjectID(addr uintptr) traceArg {
131 return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
132 }
133
134
135 func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
136 order := traceCompressStackSize(size)
137 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStack, traceGoroutineStackID(base), order)
138 }
139
140
141 func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
142 order := traceCompressStackSize(size)
143 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
144 }
145
146
147 func (tl traceLocker) GoroutineStackFree(base uintptr) {
148 tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackFree, traceGoroutineStackID(base))
149 }
150
151
152 func traceGoroutineStackID(base uintptr) traceArg {
153 return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
154 }
155
156
157 func traceCompressStackSize(size uintptr) traceArg {
158 if size&(size-1) != 0 {
159 throw("goroutine stack size is not a power of 2")
160 }
161 return traceArg(sys.Len64(uint64(size)))
162 }
163
View as plain text