Source file
src/runtime/heapdump.go
1
2
3
4
5
6
7
8
9
10
11
12 package runtime
13
14 import (
15 "internal/abi"
16 "internal/goarch"
17 "unsafe"
18 )
19
20
21 func runtime_debug_WriteHeapDump(fd uintptr) {
22 stw := stopTheWorld(stwWriteHeapDump)
23
24
25
26
27
28
29
30 var m MemStats
31 systemstack(func() {
32
33
34
35 readmemstats_m(&m)
36 writeheapdump_m(fd, &m)
37 })
38
39 startTheWorld(stw)
40 }
41
42 const (
43 fieldKindEol = 0
44 fieldKindPtr = 1
45 fieldKindIface = 2
46 fieldKindEface = 3
47 tagEOF = 0
48 tagObject = 1
49 tagOtherRoot = 2
50 tagType = 3
51 tagGoroutine = 4
52 tagStackFrame = 5
53 tagParams = 6
54 tagFinalizer = 7
55 tagItab = 8
56 tagOSThread = 9
57 tagMemStats = 10
58 tagQueuedFinalizer = 11
59 tagData = 12
60 tagBSS = 13
61 tagDefer = 14
62 tagPanic = 15
63 tagMemProf = 16
64 tagAllocSample = 17
65 )
66
67 var dumpfd uintptr
68 var tmpbuf []byte
69
70
71 const (
72 bufSize = 4096
73 )
74
75 var buf [bufSize]byte
76 var nbuf uintptr
77
78 func dwrite(data unsafe.Pointer, len uintptr) {
79 if len == 0 {
80 return
81 }
82 if nbuf+len <= bufSize {
83 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
84 nbuf += len
85 return
86 }
87
88 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
89 if len >= bufSize {
90 write(dumpfd, data, int32(len))
91 nbuf = 0
92 } else {
93 copy(buf[:], (*[bufSize]byte)(data)[:len])
94 nbuf = len
95 }
96 }
97
98 func dwritebyte(b byte) {
99 dwrite(unsafe.Pointer(&b), 1)
100 }
101
102 func flush() {
103 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
104 nbuf = 0
105 }
106
107
108
109
110
111
112
113 const (
114 typeCacheBuckets = 256
115 typeCacheAssoc = 4
116 )
117
118 type typeCacheBucket struct {
119 t [typeCacheAssoc]*_type
120 }
121
122 var typecache [typeCacheBuckets]typeCacheBucket
123
124
125 func dumpint(v uint64) {
126 var buf [10]byte
127 var n int
128 for v >= 0x80 {
129 buf[n] = byte(v | 0x80)
130 n++
131 v >>= 7
132 }
133 buf[n] = byte(v)
134 n++
135 dwrite(unsafe.Pointer(&buf), uintptr(n))
136 }
137
138 func dumpbool(b bool) {
139 if b {
140 dumpint(1)
141 } else {
142 dumpint(0)
143 }
144 }
145
146
147 func dumpmemrange(data unsafe.Pointer, len uintptr) {
148 dumpint(uint64(len))
149 dwrite(data, len)
150 }
151
152 func dumpslice(b []byte) {
153 dumpint(uint64(len(b)))
154 if len(b) > 0 {
155 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
156 }
157 }
158
159 func dumpstr(s string) {
160 dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
161 }
162
163
164 func dumptype(t *_type) {
165 if t == nil {
166 return
167 }
168
169
170
171 b := &typecache[t.Hash&(typeCacheBuckets-1)]
172 if t == b.t[0] {
173 return
174 }
175 for i := 1; i < typeCacheAssoc; i++ {
176 if t == b.t[i] {
177
178 for j := i; j > 0; j-- {
179 b.t[j] = b.t[j-1]
180 }
181 b.t[0] = t
182 return
183 }
184 }
185
186
187
188 for j := typeCacheAssoc - 1; j > 0; j-- {
189 b.t[j] = b.t[j-1]
190 }
191 b.t[0] = t
192
193
194 dumpint(tagType)
195 dumpint(uint64(uintptr(unsafe.Pointer(t))))
196 dumpint(uint64(t.Size_))
197 rt := toRType(t)
198 if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
199 dumpstr(rt.string())
200 } else {
201 pkgpath := rt.nameOff(x.PkgPath).Name()
202 name := rt.name()
203 dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
204 dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
205 dwritebyte('.')
206 dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
207 }
208 dumpbool(t.Kind_&abi.KindDirectIface == 0 || t.PtrBytes != 0)
209 }
210
211
212 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
213 dumpint(tagObject)
214 dumpint(uint64(uintptr(obj)))
215 dumpmemrange(obj, size)
216 dumpfields(bv)
217 }
218
219 func dumpotherroot(description string, to unsafe.Pointer) {
220 dumpint(tagOtherRoot)
221 dumpstr(description)
222 dumpint(uint64(uintptr(to)))
223 }
224
225 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
226 dumpint(tagFinalizer)
227 dumpint(uint64(uintptr(obj)))
228 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
229 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
230 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
231 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
232 }
233
234 type childInfo struct {
235
236
237 argoff uintptr
238 arglen uintptr
239 args bitvector
240 sp *uint8
241 depth uintptr
242 }
243
244
245 func dumpbv(cbv *bitvector, offset uintptr) {
246 for i := uintptr(0); i < uintptr(cbv.n); i++ {
247 if cbv.ptrbit(i) == 1 {
248 dumpint(fieldKindPtr)
249 dumpint(uint64(offset + i*goarch.PtrSize))
250 }
251 }
252 }
253
254 func dumpframe(s *stkframe, child *childInfo) {
255 f := s.fn
256
257
258 pc := s.pc
259 pcdata := int32(-1)
260 if pc != f.entry() {
261 pc--
262 pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
263 }
264 if pcdata == -1 {
265
266
267
268 pcdata = 0
269 }
270 stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
271
272 var bv bitvector
273 if stkmap != nil && stkmap.n > 0 {
274 bv = stackmapdata(stkmap, pcdata)
275 } else {
276 bv.n = -1
277 }
278
279
280 dumpint(tagStackFrame)
281 dumpint(uint64(s.sp))
282 dumpint(uint64(child.depth))
283 dumpint(uint64(uintptr(unsafe.Pointer(child.sp))))
284 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)
285 dumpint(uint64(f.entry()))
286 dumpint(uint64(s.pc))
287 dumpint(uint64(s.continpc))
288 name := funcname(f)
289 if name == "" {
290 name = "unknown function"
291 }
292 dumpstr(name)
293
294
295 if child.args.n >= 0 {
296 dumpbv(&child.args, child.argoff)
297 } else {
298
299 for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
300 dumpint(fieldKindPtr)
301 dumpint(uint64(off))
302 }
303 }
304
305
306 if stkmap == nil {
307
308 for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
309 dumpint(fieldKindPtr)
310 dumpint(uint64(off))
311 }
312 } else if stkmap.n < 0 {
313
314 size := uintptr(-stkmap.n)
315 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
316 dumpint(fieldKindPtr)
317 dumpint(uint64(off))
318 }
319 } else if stkmap.n > 0 {
320
321
322 dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
323 }
324 dumpint(fieldKindEol)
325
326
327 child.argoff = s.argp - s.fp
328 child.arglen = s.argBytes()
329 child.sp = (*uint8)(unsafe.Pointer(s.sp))
330 child.depth++
331 stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
332 if stkmap != nil {
333 child.args = stackmapdata(stkmap, pcdata)
334 } else {
335 child.args.n = -1
336 }
337 return
338 }
339
340 func dumpgoroutine(gp *g) {
341 var sp, pc, lr uintptr
342 if gp.syscallsp != 0 {
343 sp = gp.syscallsp
344 pc = gp.syscallpc
345 lr = 0
346 } else {
347 sp = gp.sched.sp
348 pc = gp.sched.pc
349 lr = gp.sched.lr
350 }
351
352 dumpint(tagGoroutine)
353 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
354 dumpint(uint64(sp))
355 dumpint(gp.goid)
356 dumpint(uint64(gp.gopc))
357 dumpint(uint64(readgstatus(gp)))
358 dumpbool(isSystemGoroutine(gp, false))
359 dumpbool(false)
360 dumpint(uint64(gp.waitsince))
361 dumpstr(gp.waitreason.String())
362 dumpint(uint64(uintptr(gp.sched.ctxt)))
363 dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
364 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
365 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
366
367
368 var child childInfo
369 child.args.n = -1
370 child.arglen = 0
371 child.sp = nil
372 child.depth = 0
373 var u unwinder
374 for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
375 dumpframe(&u.frame, &child)
376 }
377
378
379 for d := gp._defer; d != nil; d = d.link {
380 dumpint(tagDefer)
381 dumpint(uint64(uintptr(unsafe.Pointer(d))))
382 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
383 dumpint(uint64(d.sp))
384 dumpint(uint64(d.pc))
385 fn := *(**funcval)(unsafe.Pointer(&d.fn))
386 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
387 if d.fn == nil {
388
389 dumpint(uint64(0))
390 } else {
391 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
392 }
393 dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
394 }
395 for p := gp._panic; p != nil; p = p.link {
396 dumpint(tagPanic)
397 dumpint(uint64(uintptr(unsafe.Pointer(p))))
398 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
399 eface := efaceOf(&p.arg)
400 dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
401 dumpint(uint64(uintptr(eface.data)))
402 dumpint(0)
403 dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
404 }
405 }
406
407 func dumpgs() {
408 assertWorldStopped()
409
410
411 forEachG(func(gp *g) {
412 status := readgstatus(gp)
413 switch status {
414 default:
415 print("runtime: unexpected G.status ", hex(status), "\n")
416 throw("dumpgs in STW - bad status")
417 case _Gdead:
418
419 case _Grunnable,
420 _Gsyscall,
421 _Gwaiting:
422 dumpgoroutine(gp)
423 }
424 })
425 }
426
427 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
428 dumpint(tagQueuedFinalizer)
429 dumpint(uint64(uintptr(obj)))
430 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
431 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
432 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
433 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
434 }
435
436 func dumproots() {
437
438 assertWorldStopped()
439
440
441
442 dumpint(tagData)
443 dumpint(uint64(firstmoduledata.data))
444 dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
445 dumpfields(firstmoduledata.gcdatamask)
446
447
448 dumpint(tagBSS)
449 dumpint(uint64(firstmoduledata.bss))
450 dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
451 dumpfields(firstmoduledata.gcbssmask)
452
453
454 for _, s := range mheap_.allspans {
455 if s.state.get() == mSpanInUse {
456
457 for sp := s.specials; sp != nil; sp = sp.next {
458 if sp.kind != _KindSpecialFinalizer {
459 continue
460 }
461 spf := (*specialfinalizer)(unsafe.Pointer(sp))
462 p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
463 dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
464 }
465 }
466 }
467
468
469 iterate_finq(finq_callback)
470 }
471
472
473
474 var freemark [_PageSize / 8]bool
475
476 func dumpobjs() {
477
478 assertWorldStopped()
479
480 for _, s := range mheap_.allspans {
481 if s.state.get() != mSpanInUse {
482 continue
483 }
484 p := s.base()
485 size := s.elemsize
486 n := (s.npages << _PageShift) / size
487 if n > uintptr(len(freemark)) {
488 throw("freemark array doesn't have enough entries")
489 }
490
491 for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
492 if s.isFree(uintptr(freeIndex)) {
493 freemark[freeIndex] = true
494 }
495 }
496
497 for j := uintptr(0); j < n; j, p = j+1, p+size {
498 if freemark[j] {
499 freemark[j] = false
500 continue
501 }
502 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
503 }
504 }
505 }
506
507 func dumpparams() {
508 dumpint(tagParams)
509 x := uintptr(1)
510 if *(*byte)(unsafe.Pointer(&x)) == 1 {
511 dumpbool(false)
512 } else {
513 dumpbool(true)
514 }
515 dumpint(goarch.PtrSize)
516 var arenaStart, arenaEnd uintptr
517 for i1 := range mheap_.arenas {
518 if mheap_.arenas[i1] == nil {
519 continue
520 }
521 for i, ha := range mheap_.arenas[i1] {
522 if ha == nil {
523 continue
524 }
525 base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
526 if arenaStart == 0 || base < arenaStart {
527 arenaStart = base
528 }
529 if base+heapArenaBytes > arenaEnd {
530 arenaEnd = base + heapArenaBytes
531 }
532 }
533 }
534 dumpint(uint64(arenaStart))
535 dumpint(uint64(arenaEnd))
536 dumpstr(goarch.GOARCH)
537 dumpstr(buildVersion)
538 dumpint(uint64(ncpu))
539 }
540
541 func itab_callback(tab *itab) {
542 t := tab.Type
543 dumptype(t)
544 dumpint(tagItab)
545 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
546 dumpint(uint64(uintptr(unsafe.Pointer(t))))
547 }
548
549 func dumpitabs() {
550 iterate_itabs(itab_callback)
551 }
552
553 func dumpms() {
554 for mp := allm; mp != nil; mp = mp.alllink {
555 dumpint(tagOSThread)
556 dumpint(uint64(uintptr(unsafe.Pointer(mp))))
557 dumpint(uint64(mp.id))
558 dumpint(mp.procid)
559 }
560 }
561
562
563 func dumpmemstats(m *MemStats) {
564 assertWorldStopped()
565
566
567
568
569 dumpint(tagMemStats)
570 dumpint(m.Alloc)
571 dumpint(m.TotalAlloc)
572 dumpint(m.Sys)
573 dumpint(m.Lookups)
574 dumpint(m.Mallocs)
575 dumpint(m.Frees)
576 dumpint(m.HeapAlloc)
577 dumpint(m.HeapSys)
578 dumpint(m.HeapIdle)
579 dumpint(m.HeapInuse)
580 dumpint(m.HeapReleased)
581 dumpint(m.HeapObjects)
582 dumpint(m.StackInuse)
583 dumpint(m.StackSys)
584 dumpint(m.MSpanInuse)
585 dumpint(m.MSpanSys)
586 dumpint(m.MCacheInuse)
587 dumpint(m.MCacheSys)
588 dumpint(m.BuckHashSys)
589 dumpint(m.GCSys)
590 dumpint(m.OtherSys)
591 dumpint(m.NextGC)
592 dumpint(m.LastGC)
593 dumpint(m.PauseTotalNs)
594 for i := 0; i < 256; i++ {
595 dumpint(m.PauseNs[i])
596 }
597 dumpint(uint64(m.NumGC))
598 }
599
600 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
601 stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
602 dumpint(tagMemProf)
603 dumpint(uint64(uintptr(unsafe.Pointer(b))))
604 dumpint(uint64(size))
605 dumpint(uint64(nstk))
606 for i := uintptr(0); i < nstk; i++ {
607 pc := stk[i]
608 f := findfunc(pc)
609 if !f.valid() {
610 var buf [64]byte
611 n := len(buf)
612 n--
613 buf[n] = ')'
614 if pc == 0 {
615 n--
616 buf[n] = '0'
617 } else {
618 for pc > 0 {
619 n--
620 buf[n] = "0123456789abcdef"[pc&15]
621 pc >>= 4
622 }
623 }
624 n--
625 buf[n] = 'x'
626 n--
627 buf[n] = '0'
628 n--
629 buf[n] = '('
630 dumpslice(buf[n:])
631 dumpstr("?")
632 dumpint(0)
633 } else {
634 dumpstr(funcname(f))
635 if i > 0 && pc > f.entry() {
636 pc--
637 }
638 file, line := funcline(f, pc)
639 dumpstr(file)
640 dumpint(uint64(line))
641 }
642 }
643 dumpint(uint64(allocs))
644 dumpint(uint64(frees))
645 }
646
647 func dumpmemprof() {
648
649 assertWorldStopped()
650
651 iterate_memprof(dumpmemprof_callback)
652 for _, s := range mheap_.allspans {
653 if s.state.get() != mSpanInUse {
654 continue
655 }
656 for sp := s.specials; sp != nil; sp = sp.next {
657 if sp.kind != _KindSpecialProfile {
658 continue
659 }
660 spp := (*specialprofile)(unsafe.Pointer(sp))
661 p := s.base() + uintptr(spp.special.offset)
662 dumpint(tagAllocSample)
663 dumpint(uint64(p))
664 dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
665 }
666 }
667 }
668
669 var dumphdr = []byte("go1.7 heap dump\n")
670
671 func mdump(m *MemStats) {
672 assertWorldStopped()
673
674
675 for _, s := range mheap_.allspans {
676 if s.state.get() == mSpanInUse {
677 s.ensureSwept()
678 }
679 }
680 memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
681 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
682 dumpparams()
683 dumpitabs()
684 dumpobjs()
685 dumpgs()
686 dumpms()
687 dumproots()
688 dumpmemstats(m)
689 dumpmemprof()
690 dumpint(tagEOF)
691 flush()
692 }
693
694 func writeheapdump_m(fd uintptr, m *MemStats) {
695 assertWorldStopped()
696
697 gp := getg()
698 casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
699
700
701 dumpfd = fd
702
703
704 mdump(m)
705
706
707 dumpfd = 0
708 if tmpbuf != nil {
709 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
710 tmpbuf = nil
711 }
712
713 casgstatus(gp.m.curg, _Gwaiting, _Grunning)
714 }
715
716
717 func dumpfields(bv bitvector) {
718 dumpbv(&bv, 0)
719 dumpint(fieldKindEol)
720 }
721
722 func makeheapobjbv(p uintptr, size uintptr) bitvector {
723
724 nptr := size / goarch.PtrSize
725 if uintptr(len(tmpbuf)) < nptr/8+1 {
726 if tmpbuf != nil {
727 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
728 }
729 n := nptr/8 + 1
730 p := sysAlloc(n, &memstats.other_sys)
731 if p == nil {
732 throw("heapdump: out of memory")
733 }
734 tmpbuf = (*[1 << 30]byte)(p)[:n]
735 }
736
737 clear(tmpbuf[:nptr/8+1])
738 s := spanOf(p)
739 tp := s.typePointersOf(p, size)
740 for {
741 var addr uintptr
742 if tp, addr = tp.next(p + size); addr == 0 {
743 break
744 }
745 i := (addr - p) / goarch.PtrSize
746 tmpbuf[i/8] |= 1 << (i % 8)
747 }
748 return bitvector{int32(nptr), &tmpbuf[0]}
749 }
750
View as plain text