Source file
src/runtime/heapdump.go
1
2
3
4
5
6
7
8
9
10
11
12 package runtime
13
14 import (
15 "internal/abi"
16 "internal/goarch"
17 "internal/runtime/gc"
18 "unsafe"
19 )
20
21
22 func runtime_debug_WriteHeapDump(fd uintptr) {
23 stw := stopTheWorld(stwWriteHeapDump)
24
25
26
27
28
29
30
31 var m MemStats
32 systemstack(func() {
33
34
35
36 readmemstats_m(&m)
37 writeheapdump_m(fd, &m)
38 })
39
40 startTheWorld(stw)
41 }
42
43 const (
44 fieldKindEol = 0
45 fieldKindPtr = 1
46 fieldKindIface = 2
47 fieldKindEface = 3
48 tagEOF = 0
49 tagObject = 1
50 tagOtherRoot = 2
51 tagType = 3
52 tagGoroutine = 4
53 tagStackFrame = 5
54 tagParams = 6
55 tagFinalizer = 7
56 tagItab = 8
57 tagOSThread = 9
58 tagMemStats = 10
59 tagQueuedFinalizer = 11
60 tagData = 12
61 tagBSS = 13
62 tagDefer = 14
63 tagPanic = 15
64 tagMemProf = 16
65 tagAllocSample = 17
66 )
67
68 var dumpfd uintptr
69 var tmpbuf []byte
70
71
72 const (
73 bufSize = 4096
74 )
75
76 var buf [bufSize]byte
77 var nbuf uintptr
78
79 func dwrite(data unsafe.Pointer, len uintptr) {
80 if len == 0 {
81 return
82 }
83 if nbuf+len <= bufSize {
84 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
85 nbuf += len
86 return
87 }
88
89 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
90 if len >= bufSize {
91 write(dumpfd, data, int32(len))
92 nbuf = 0
93 } else {
94 copy(buf[:], (*[bufSize]byte)(data)[:len])
95 nbuf = len
96 }
97 }
98
99 func dwritebyte(b byte) {
100 dwrite(unsafe.Pointer(&b), 1)
101 }
102
103 func flush() {
104 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
105 nbuf = 0
106 }
107
108
109
110
111
112
113
114 const (
115 typeCacheBuckets = 256
116 typeCacheAssoc = 4
117 )
118
119 type typeCacheBucket struct {
120 t [typeCacheAssoc]*_type
121 }
122
123 var typecache [typeCacheBuckets]typeCacheBucket
124
125
126 func dumpint(v uint64) {
127 var buf [10]byte
128 var n int
129 for v >= 0x80 {
130 buf[n] = byte(v | 0x80)
131 n++
132 v >>= 7
133 }
134 buf[n] = byte(v)
135 n++
136 dwrite(unsafe.Pointer(&buf), uintptr(n))
137 }
138
139 func dumpbool(b bool) {
140 if b {
141 dumpint(1)
142 } else {
143 dumpint(0)
144 }
145 }
146
147
148 func dumpmemrange(data unsafe.Pointer, len uintptr) {
149 dumpint(uint64(len))
150 dwrite(data, len)
151 }
152
153 func dumpslice(b []byte) {
154 dumpint(uint64(len(b)))
155 if len(b) > 0 {
156 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
157 }
158 }
159
160 func dumpstr(s string) {
161 dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
162 }
163
164
165 func dumptype(t *_type) {
166 if t == nil {
167 return
168 }
169
170
171
172 b := &typecache[t.Hash&(typeCacheBuckets-1)]
173 if t == b.t[0] {
174 return
175 }
176 for i := 1; i < typeCacheAssoc; i++ {
177 if t == b.t[i] {
178
179 for j := i; j > 0; j-- {
180 b.t[j] = b.t[j-1]
181 }
182 b.t[0] = t
183 return
184 }
185 }
186
187
188
189 for j := typeCacheAssoc - 1; j > 0; j-- {
190 b.t[j] = b.t[j-1]
191 }
192 b.t[0] = t
193
194
195 dumpint(tagType)
196 dumpint(uint64(uintptr(unsafe.Pointer(t))))
197 dumpint(uint64(t.Size_))
198 rt := toRType(t)
199 if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
200 dumpstr(rt.string())
201 } else {
202 pkgpath := rt.nameOff(x.PkgPath).Name()
203 name := rt.name()
204 dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
205 dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
206 dwritebyte('.')
207 dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
208 }
209 dumpbool(!t.IsDirectIface() || t.Pointers())
210 }
211
212
213 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
214 dumpint(tagObject)
215 dumpint(uint64(uintptr(obj)))
216 dumpmemrange(obj, size)
217 dumpfields(bv)
218 }
219
220 func dumpotherroot(description string, to unsafe.Pointer) {
221 dumpint(tagOtherRoot)
222 dumpstr(description)
223 dumpint(uint64(uintptr(to)))
224 }
225
226 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
227 dumpint(tagFinalizer)
228 dumpint(uint64(uintptr(obj)))
229 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
230 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
231 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
232 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
233 }
234
235 type childInfo struct {
236
237
238 argoff uintptr
239 arglen uintptr
240 args bitvector
241 sp *uint8
242 depth uintptr
243 }
244
245
246 func dumpbv(cbv *bitvector, offset uintptr) {
247 for i := uintptr(0); i < uintptr(cbv.n); i++ {
248 if cbv.ptrbit(i) == 1 {
249 dumpint(fieldKindPtr)
250 dumpint(uint64(offset + i*goarch.PtrSize))
251 }
252 }
253 }
254
255 func dumpframe(s *stkframe, child *childInfo) {
256 f := s.fn
257
258
259 pc := s.pc
260 pcdata := int32(-1)
261 if pc != f.entry() {
262 pc--
263 pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
264 }
265 if pcdata == -1 {
266
267
268
269 pcdata = 0
270 }
271 stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
272
273 var bv bitvector
274 if stkmap != nil && stkmap.n > 0 {
275 bv = stackmapdata(stkmap, pcdata)
276 } else {
277 bv.n = -1
278 }
279
280
281 dumpint(tagStackFrame)
282 dumpint(uint64(s.sp))
283 dumpint(uint64(child.depth))
284 dumpint(uint64(uintptr(unsafe.Pointer(child.sp))))
285 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)
286 dumpint(uint64(f.entry()))
287 dumpint(uint64(s.pc))
288 dumpint(uint64(s.continpc))
289 name := funcname(f)
290 if name == "" {
291 name = "unknown function"
292 }
293 dumpstr(name)
294
295
296 if child.args.n >= 0 {
297 dumpbv(&child.args, child.argoff)
298 } else {
299
300 for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
301 dumpint(fieldKindPtr)
302 dumpint(uint64(off))
303 }
304 }
305
306
307 if stkmap == nil {
308
309 for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
310 dumpint(fieldKindPtr)
311 dumpint(uint64(off))
312 }
313 } else if stkmap.n < 0 {
314
315 size := uintptr(-stkmap.n)
316 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
317 dumpint(fieldKindPtr)
318 dumpint(uint64(off))
319 }
320 } else if stkmap.n > 0 {
321
322
323 dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
324 }
325 dumpint(fieldKindEol)
326
327
328 child.argoff = s.argp - s.fp
329 child.arglen = s.argBytes()
330 child.sp = (*uint8)(unsafe.Pointer(s.sp))
331 child.depth++
332 stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
333 if stkmap != nil {
334 child.args = stackmapdata(stkmap, pcdata)
335 } else {
336 child.args.n = -1
337 }
338 return
339 }
340
341 func dumpgoroutine(gp *g) {
342 var sp, pc, lr uintptr
343 if gp.syscallsp != 0 {
344 sp = gp.syscallsp
345 pc = gp.syscallpc
346 lr = 0
347 } else {
348 sp = gp.sched.sp
349 pc = gp.sched.pc
350 lr = gp.sched.lr
351 }
352
353 dumpint(tagGoroutine)
354 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
355 dumpint(uint64(sp))
356 dumpint(gp.goid)
357 dumpint(uint64(gp.gopc))
358 dumpint(uint64(readgstatus(gp)))
359 dumpbool(isSystemGoroutine(gp, false))
360 dumpbool(false)
361 dumpint(uint64(gp.waitsince))
362 dumpstr(gp.waitreason.String())
363 dumpint(uint64(uintptr(gp.sched.ctxt)))
364 dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
365 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
366 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
367
368
369 var child childInfo
370 child.args.n = -1
371 child.arglen = 0
372 child.sp = nil
373 child.depth = 0
374 var u unwinder
375 for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
376 dumpframe(&u.frame, &child)
377 }
378
379
380 for d := gp._defer; d != nil; d = d.link {
381 dumpint(tagDefer)
382 dumpint(uint64(uintptr(unsafe.Pointer(d))))
383 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
384 dumpint(uint64(d.sp))
385 dumpint(uint64(d.pc))
386 fn := *(**funcval)(unsafe.Pointer(&d.fn))
387 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
388 if d.fn == nil {
389
390 dumpint(uint64(0))
391 } else {
392 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
393 }
394 dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
395 }
396 for p := gp._panic; p != nil; p = p.link {
397 dumpint(tagPanic)
398 dumpint(uint64(uintptr(unsafe.Pointer(p))))
399 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
400 eface := efaceOf(&p.arg)
401 dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
402 dumpint(uint64(uintptr(eface.data)))
403 dumpint(0)
404 dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
405 }
406 }
407
408 func dumpgs() {
409 assertWorldStopped()
410
411
412 forEachG(func(gp *g) {
413 status := readgstatus(gp)
414 switch status {
415 case _Grunning:
416
417
418
419 if gp.syscallsp != 0 {
420 dumpgoroutine(gp)
421 }
422 fallthrough
423 default:
424 print("runtime: unexpected G.status ", hex(status), "\n")
425 throw("dumpgs in STW - bad status")
426 case _Gdead, _Gdeadextra:
427
428 case _Grunnable,
429 _Gsyscall,
430 _Gwaiting:
431 dumpgoroutine(gp)
432 }
433 })
434 }
435
436 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
437 dumpint(tagQueuedFinalizer)
438 dumpint(uint64(uintptr(obj)))
439 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
440 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
441 dumpint(uint64(uintptr(unsafe.Pointer(fint))))
442 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
443 }
444
445 func dumproots() {
446
447 assertWorldStopped()
448
449
450
451 dumpint(tagData)
452 dumpint(uint64(firstmoduledata.data))
453 dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
454 dumpfields(firstmoduledata.gcdatamask)
455
456
457 dumpint(tagBSS)
458 dumpint(uint64(firstmoduledata.bss))
459 dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
460 dumpfields(firstmoduledata.gcbssmask)
461
462
463 for _, s := range mheap_.allspans {
464 if s.state.get() == mSpanInUse {
465
466 for sp := s.specials; sp != nil; sp = sp.next {
467 if sp.kind != _KindSpecialFinalizer {
468 continue
469 }
470 spf := (*specialfinalizer)(unsafe.Pointer(sp))
471 p := unsafe.Pointer(s.base() + spf.special.offset)
472 dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
473 }
474 }
475 }
476
477
478 iterate_finq(finq_callback)
479 }
480
481
482
483 var freemark [pageSize / 8]bool
484
485 func dumpobjs() {
486
487 assertWorldStopped()
488
489 for _, s := range mheap_.allspans {
490 if s.state.get() != mSpanInUse {
491 continue
492 }
493 p := s.base()
494 size := s.elemsize
495 n := (s.npages << gc.PageShift) / size
496 if n > uintptr(len(freemark)) {
497 throw("freemark array doesn't have enough entries")
498 }
499
500 for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
501 if s.isFree(uintptr(freeIndex)) {
502 freemark[freeIndex] = true
503 }
504 }
505
506 for j := uintptr(0); j < n; j, p = j+1, p+size {
507 if freemark[j] {
508 freemark[j] = false
509 continue
510 }
511 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
512 }
513 }
514 }
515
516 func dumpparams() {
517 dumpint(tagParams)
518 x := uintptr(1)
519 if *(*byte)(unsafe.Pointer(&x)) == 1 {
520 dumpbool(false)
521 } else {
522 dumpbool(true)
523 }
524 dumpint(goarch.PtrSize)
525 var arenaStart, arenaEnd uintptr
526 for i1 := range mheap_.arenas {
527 if mheap_.arenas[i1] == nil {
528 continue
529 }
530 for i, ha := range mheap_.arenas[i1] {
531 if ha == nil {
532 continue
533 }
534 base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
535 if arenaStart == 0 || base < arenaStart {
536 arenaStart = base
537 }
538 if base+heapArenaBytes > arenaEnd {
539 arenaEnd = base + heapArenaBytes
540 }
541 }
542 }
543 dumpint(uint64(arenaStart))
544 dumpint(uint64(arenaEnd))
545 dumpstr(goarch.GOARCH)
546 dumpstr(buildVersion)
547 dumpint(uint64(numCPUStartup))
548 }
549
550 func itab_callback(tab *itab) {
551 t := tab.Type
552 dumptype(t)
553 dumpint(tagItab)
554 dumpint(uint64(uintptr(unsafe.Pointer(tab))))
555 dumpint(uint64(uintptr(unsafe.Pointer(t))))
556 }
557
558 func dumpitabs() {
559 iterate_itabs(itab_callback)
560 }
561
562 func dumpms() {
563 for mp := allm; mp != nil; mp = mp.alllink {
564 dumpint(tagOSThread)
565 dumpint(uint64(uintptr(unsafe.Pointer(mp))))
566 dumpint(uint64(mp.id))
567 dumpint(mp.procid)
568 }
569 }
570
571
572 func dumpmemstats(m *MemStats) {
573 assertWorldStopped()
574
575
576
577
578 dumpint(tagMemStats)
579 dumpint(m.Alloc)
580 dumpint(m.TotalAlloc)
581 dumpint(m.Sys)
582 dumpint(m.Lookups)
583 dumpint(m.Mallocs)
584 dumpint(m.Frees)
585 dumpint(m.HeapAlloc)
586 dumpint(m.HeapSys)
587 dumpint(m.HeapIdle)
588 dumpint(m.HeapInuse)
589 dumpint(m.HeapReleased)
590 dumpint(m.HeapObjects)
591 dumpint(m.StackInuse)
592 dumpint(m.StackSys)
593 dumpint(m.MSpanInuse)
594 dumpint(m.MSpanSys)
595 dumpint(m.MCacheInuse)
596 dumpint(m.MCacheSys)
597 dumpint(m.BuckHashSys)
598 dumpint(m.GCSys)
599 dumpint(m.OtherSys)
600 dumpint(m.NextGC)
601 dumpint(m.LastGC)
602 dumpint(m.PauseTotalNs)
603 for i := 0; i < 256; i++ {
604 dumpint(m.PauseNs[i])
605 }
606 dumpint(uint64(m.NumGC))
607 }
608
609 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
610 stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
611 dumpint(tagMemProf)
612 dumpint(uint64(uintptr(unsafe.Pointer(b))))
613 dumpint(uint64(size))
614 dumpint(uint64(nstk))
615 for i := uintptr(0); i < nstk; i++ {
616 pc := stk[i]
617 f := findfunc(pc)
618 if !f.valid() {
619 var buf [64]byte
620 n := len(buf)
621 n--
622 buf[n] = ')'
623 if pc == 0 {
624 n--
625 buf[n] = '0'
626 } else {
627 for pc > 0 {
628 n--
629 buf[n] = "0123456789abcdef"[pc&15]
630 pc >>= 4
631 }
632 }
633 n--
634 buf[n] = 'x'
635 n--
636 buf[n] = '0'
637 n--
638 buf[n] = '('
639 dumpslice(buf[n:])
640 dumpstr("?")
641 dumpint(0)
642 } else {
643 dumpstr(funcname(f))
644 if i > 0 && pc > f.entry() {
645 pc--
646 }
647 file, line := funcline(f, pc)
648 dumpstr(file)
649 dumpint(uint64(line))
650 }
651 }
652 dumpint(uint64(allocs))
653 dumpint(uint64(frees))
654 }
655
656 func dumpmemprof() {
657
658 assertWorldStopped()
659
660 iterate_memprof(dumpmemprof_callback)
661 for _, s := range mheap_.allspans {
662 if s.state.get() != mSpanInUse {
663 continue
664 }
665 for sp := s.specials; sp != nil; sp = sp.next {
666 if sp.kind != _KindSpecialProfile {
667 continue
668 }
669 spp := (*specialprofile)(unsafe.Pointer(sp))
670 p := s.base() + spp.special.offset
671 dumpint(tagAllocSample)
672 dumpint(uint64(p))
673 dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
674 }
675 }
676 }
677
678 var dumphdr = []byte("go1.7 heap dump\n")
679
680 func mdump(m *MemStats) {
681 assertWorldStopped()
682
683
684 for _, s := range mheap_.allspans {
685 if s.state.get() == mSpanInUse {
686 s.ensureSwept()
687 }
688 }
689 memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
690 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
691 dumpparams()
692 dumpitabs()
693 dumpobjs()
694 dumpgs()
695 dumpms()
696 dumproots()
697 dumpmemstats(m)
698 dumpmemprof()
699 dumpint(tagEOF)
700 flush()
701 }
702
703 func writeheapdump_m(fd uintptr, m *MemStats) {
704 assertWorldStopped()
705
706 gp := getg()
707 casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
708
709
710 dumpfd = fd
711
712
713 mdump(m)
714
715
716 dumpfd = 0
717 if tmpbuf != nil {
718 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
719 tmpbuf = nil
720 }
721
722 casgstatus(gp.m.curg, _Gwaiting, _Grunning)
723 }
724
725
726 func dumpfields(bv bitvector) {
727 dumpbv(&bv, 0)
728 dumpint(fieldKindEol)
729 }
730
731 func makeheapobjbv(p uintptr, size uintptr) bitvector {
732
733 nptr := size / goarch.PtrSize
734 if uintptr(len(tmpbuf)) < nptr/8+1 {
735 if tmpbuf != nil {
736 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
737 }
738 n := nptr/8 + 1
739 p := sysAlloc(n, &memstats.other_sys, "heapdump")
740 if p == nil {
741 throw("heapdump: out of memory")
742 }
743 tmpbuf = (*[1 << 30]byte)(p)[:n]
744 }
745
746 clear(tmpbuf[:nptr/8+1])
747 s := spanOf(p)
748 tp := s.typePointersOf(p, size)
749 for {
750 var addr uintptr
751 if tp, addr = tp.next(p + size); addr == 0 {
752 break
753 }
754 i := (addr - p) / goarch.PtrSize
755 tmpbuf[i/8] |= 1 << (i % 8)
756 }
757 return bitvector{int32(nptr), &tmpbuf[0]}
758 }
759
View as plain text