1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 package pprof
77
78 import (
79 "bufio"
80 "cmp"
81 "fmt"
82 "internal/abi"
83 "internal/profilerecord"
84 "io"
85 "runtime"
86 "slices"
87 "sort"
88 "strings"
89 "sync"
90 "text/tabwriter"
91 "time"
92 "unsafe"
93 )
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 type Profile struct {
179 name string
180 mu sync.Mutex
181 m map[any][]uintptr
182 count func() int
183 write func(io.Writer, int) error
184 }
185
186
187 var profiles struct {
188 mu sync.Mutex
189 m map[string]*Profile
190 }
191
192 var goroutineProfile = &Profile{
193 name: "goroutine",
194 count: countGoroutine,
195 write: writeGoroutine,
196 }
197
198 var threadcreateProfile = &Profile{
199 name: "threadcreate",
200 count: countThreadCreate,
201 write: writeThreadCreate,
202 }
203
204 var heapProfile = &Profile{
205 name: "heap",
206 count: countHeap,
207 write: writeHeap,
208 }
209
210 var allocsProfile = &Profile{
211 name: "allocs",
212 count: countHeap,
213 write: writeAlloc,
214 }
215
216 var blockProfile = &Profile{
217 name: "block",
218 count: countBlock,
219 write: writeBlock,
220 }
221
222 var mutexProfile = &Profile{
223 name: "mutex",
224 count: countMutex,
225 write: writeMutex,
226 }
227
228 func lockProfiles() {
229 profiles.mu.Lock()
230 if profiles.m == nil {
231
232 profiles.m = map[string]*Profile{
233 "goroutine": goroutineProfile,
234 "threadcreate": threadcreateProfile,
235 "heap": heapProfile,
236 "allocs": allocsProfile,
237 "block": blockProfile,
238 "mutex": mutexProfile,
239 }
240 }
241 }
242
243 func unlockProfiles() {
244 profiles.mu.Unlock()
245 }
246
247
248
249
250
251
252
253 func NewProfile(name string) *Profile {
254 lockProfiles()
255 defer unlockProfiles()
256 if name == "" {
257 panic("pprof: NewProfile with empty name")
258 }
259 if profiles.m[name] != nil {
260 panic("pprof: NewProfile name already in use: " + name)
261 }
262 p := &Profile{
263 name: name,
264 m: map[any][]uintptr{},
265 }
266 profiles.m[name] = p
267 return p
268 }
269
270
271 func Lookup(name string) *Profile {
272 lockProfiles()
273 defer unlockProfiles()
274 return profiles.m[name]
275 }
276
277
278 func Profiles() []*Profile {
279 lockProfiles()
280 defer unlockProfiles()
281
282 all := make([]*Profile, 0, len(profiles.m))
283 for _, p := range profiles.m {
284 all = append(all, p)
285 }
286
287 slices.SortFunc(all, func(a, b *Profile) int {
288 return strings.Compare(a.name, b.name)
289 })
290 return all
291 }
292
293
294 func (p *Profile) Name() string {
295 return p.name
296 }
297
298
299 func (p *Profile) Count() int {
300 p.mu.Lock()
301 defer p.mu.Unlock()
302 if p.count != nil {
303 return p.count()
304 }
305 return len(p.m)
306 }
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 func (p *Profile) Add(value any, skip int) {
326 if p.name == "" {
327 panic("pprof: use of uninitialized Profile")
328 }
329 if p.write != nil {
330 panic("pprof: Add called on built-in Profile " + p.name)
331 }
332
333 stk := make([]uintptr, 32)
334 n := runtime.Callers(skip+1, stk[:])
335 stk = stk[:n]
336 if len(stk) == 0 {
337
338 stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
339 }
340
341 p.mu.Lock()
342 defer p.mu.Unlock()
343 if p.m[value] != nil {
344 panic("pprof: Profile.Add of duplicate value")
345 }
346 p.m[value] = stk
347 }
348
349
350
351 func (p *Profile) Remove(value any) {
352 p.mu.Lock()
353 defer p.mu.Unlock()
354 delete(p.m, value)
355 }
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372 func (p *Profile) WriteTo(w io.Writer, debug int) error {
373 if p.name == "" {
374 panic("pprof: use of zero Profile")
375 }
376 if p.write != nil {
377 return p.write(w, debug)
378 }
379
380
381 p.mu.Lock()
382 all := make([][]uintptr, 0, len(p.m))
383 for _, stk := range p.m {
384 all = append(all, stk)
385 }
386 p.mu.Unlock()
387
388
389 slices.SortFunc(all, slices.Compare)
390
391 return printCountProfile(w, debug, p.name, stackProfile(all))
392 }
393
394 type stackProfile [][]uintptr
395
396 func (x stackProfile) Len() int { return len(x) }
397 func (x stackProfile) Stack(i int) []uintptr { return x[i] }
398 func (x stackProfile) Label(i int) *labelMap { return nil }
399
400
401
402
403
404 type countProfile interface {
405 Len() int
406 Stack(i int) []uintptr
407 Label(i int) *labelMap
408 }
409
410
411
412
413 func expandInlinedFrames(dst, pcs []uintptr) int {
414 cf := runtime.CallersFrames(pcs)
415 var n int
416 for n < len(dst) {
417 f, more := cf.Next()
418
419
420 dst[n] = f.PC + 1
421 n++
422 if !more {
423 break
424 }
425 }
426 return n
427 }
428
429
430
431
432
433 func printCountCycleProfile(w io.Writer, countName, cycleName string, records []profilerecord.BlockProfileRecord) error {
434
435 b := newProfileBuilder(w)
436 b.pbValueType(tagProfile_PeriodType, countName, "count")
437 b.pb.int64Opt(tagProfile_Period, 1)
438 b.pbValueType(tagProfile_SampleType, countName, "count")
439 b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds")
440
441 cpuGHz := float64(pprof_cyclesPerSecond()) / 1e9
442
443 values := []int64{0, 0}
444 var locs []uint64
445 expandedStack := pprof_makeProfStack()
446 for _, r := range records {
447 values[0] = r.Count
448 values[1] = int64(float64(r.Cycles) / cpuGHz)
449
450
451 n := expandInlinedFrames(expandedStack, r.Stack)
452 locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
453 b.pbSample(values, locs, nil)
454 }
455 b.build()
456 return nil
457 }
458
459
460
461 func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
462
463 var buf strings.Builder
464 key := func(stk []uintptr, lbls *labelMap) string {
465 buf.Reset()
466 fmt.Fprintf(&buf, "@")
467 for _, pc := range stk {
468 fmt.Fprintf(&buf, " %#x", pc)
469 }
470 if lbls != nil {
471 buf.WriteString("\n# labels: ")
472 buf.WriteString(lbls.String())
473 }
474 return buf.String()
475 }
476 count := map[string]int{}
477 index := map[string]int{}
478 var keys []string
479 n := p.Len()
480 for i := 0; i < n; i++ {
481 k := key(p.Stack(i), p.Label(i))
482 if count[k] == 0 {
483 index[k] = i
484 keys = append(keys, k)
485 }
486 count[k]++
487 }
488
489 sort.Sort(&keysByCount{keys, count})
490
491 if debug > 0 {
492
493 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
494 fmt.Fprintf(tw, "%s profile: total %d\n", name, p.Len())
495 for _, k := range keys {
496 fmt.Fprintf(tw, "%d %s\n", count[k], k)
497 printStackRecord(tw, p.Stack(index[k]), false)
498 }
499 return tw.Flush()
500 }
501
502
503 b := newProfileBuilder(w)
504 b.pbValueType(tagProfile_PeriodType, name, "count")
505 b.pb.int64Opt(tagProfile_Period, 1)
506 b.pbValueType(tagProfile_SampleType, name, "count")
507
508 values := []int64{0}
509 var locs []uint64
510 for _, k := range keys {
511 values[0] = int64(count[k])
512
513
514 locs = b.appendLocsForStack(locs[:0], p.Stack(index[k]))
515 idx := index[k]
516 var labels func()
517 if p.Label(idx) != nil {
518 labels = func() {
519 for k, v := range *p.Label(idx) {
520 b.pbLabel(tagSample_Label, k, v, 0)
521 }
522 }
523 }
524 b.pbSample(values, locs, labels)
525 }
526 b.build()
527 return nil
528 }
529
530
531 type keysByCount struct {
532 keys []string
533 count map[string]int
534 }
535
536 func (x *keysByCount) Len() int { return len(x.keys) }
537 func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
538 func (x *keysByCount) Less(i, j int) bool {
539 ki, kj := x.keys[i], x.keys[j]
540 ci, cj := x.count[ki], x.count[kj]
541 if ci != cj {
542 return ci > cj
543 }
544 return ki < kj
545 }
546
547
548
549 func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
550 show := allFrames
551 frames := runtime.CallersFrames(stk)
552 for {
553 frame, more := frames.Next()
554 name := frame.Function
555 if name == "" {
556 show = true
557 fmt.Fprintf(w, "#\t%#x\n", frame.PC)
558 } else if name != "runtime.goexit" && (show || !strings.HasPrefix(name, "runtime.")) {
559
560
561 show = true
562 fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
563 }
564 if !more {
565 break
566 }
567 }
568 if !show {
569
570
571 printStackRecord(w, stk, true)
572 return
573 }
574 fmt.Fprintf(w, "\n")
575 }
576
577
578
579
580
581 func WriteHeapProfile(w io.Writer) error {
582 return writeHeap(w, 0)
583 }
584
585
586 func countHeap() int {
587 n, _ := runtime.MemProfile(nil, true)
588 return n
589 }
590
591
592 func writeHeap(w io.Writer, debug int) error {
593 return writeHeapInternal(w, debug, "")
594 }
595
596
597
598 func writeAlloc(w io.Writer, debug int) error {
599 return writeHeapInternal(w, debug, "alloc_space")
600 }
601
602 func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
603 var memStats *runtime.MemStats
604 if debug != 0 {
605
606
607 memStats = new(runtime.MemStats)
608 runtime.ReadMemStats(memStats)
609 }
610
611
612
613
614
615
616
617
618 var p []profilerecord.MemProfileRecord
619 n, ok := pprof_memProfileInternal(nil, true)
620 for {
621
622
623
624 p = make([]profilerecord.MemProfileRecord, n+50)
625 n, ok = pprof_memProfileInternal(p, true)
626 if ok {
627 p = p[0:n]
628 break
629 }
630
631 }
632
633 if debug == 0 {
634 return writeHeapProto(w, p, int64(runtime.MemProfileRate), defaultSampleType)
635 }
636
637 slices.SortFunc(p, func(a, b profilerecord.MemProfileRecord) int {
638 return cmp.Compare(a.InUseBytes(), b.InUseBytes())
639 })
640
641 b := bufio.NewWriter(w)
642 tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0)
643 w = tw
644
645 var total runtime.MemProfileRecord
646 for i := range p {
647 r := &p[i]
648 total.AllocBytes += r.AllocBytes
649 total.AllocObjects += r.AllocObjects
650 total.FreeBytes += r.FreeBytes
651 total.FreeObjects += r.FreeObjects
652 }
653
654
655
656
657 rate := 2 * runtime.MemProfileRate
658
659
660
661
662
663
664
665 inUseBytes := total.InUseBytes()
666 allocBytes := total.AllocBytes
667 if inUseBytes == allocBytes {
668 allocBytes++
669 }
670
671 fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
672 total.InUseObjects(), inUseBytes,
673 total.AllocObjects, allocBytes,
674 rate)
675
676 for i := range p {
677 r := &p[i]
678 fmt.Fprintf(w, "%d: %d [%d: %d] @",
679 r.InUseObjects(), r.InUseBytes(),
680 r.AllocObjects, r.AllocBytes)
681 for _, pc := range r.Stack {
682 fmt.Fprintf(w, " %#x", pc)
683 }
684 fmt.Fprintf(w, "\n")
685 printStackRecord(w, r.Stack, false)
686 }
687
688
689
690 s := memStats
691 fmt.Fprintf(w, "\n# runtime.MemStats\n")
692 fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
693 fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
694 fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
695 fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
696 fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
697 fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
698
699 fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
700 fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
701 fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
702 fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
703 fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
704 fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
705
706 fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
707 fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
708 fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
709 fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
710 fmt.Fprintf(w, "# GCSys = %d\n", s.GCSys)
711 fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys)
712
713 fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
714 fmt.Fprintf(w, "# LastGC = %d\n", s.LastGC)
715 fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
716 fmt.Fprintf(w, "# PauseEnd = %d\n", s.PauseEnd)
717 fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
718 fmt.Fprintf(w, "# NumForcedGC = %d\n", s.NumForcedGC)
719 fmt.Fprintf(w, "# GCCPUFraction = %v\n", s.GCCPUFraction)
720 fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
721
722
723 addMaxRSS(w)
724
725 tw.Flush()
726 return b.Flush()
727 }
728
729
730 func countThreadCreate() int {
731 n, _ := runtime.ThreadCreateProfile(nil)
732 return n
733 }
734
735
736 func writeThreadCreate(w io.Writer, debug int) error {
737
738
739
740 return writeRuntimeProfile(w, debug, "threadcreate", func(p []profilerecord.StackRecord, _ []unsafe.Pointer) (n int, ok bool) {
741 return pprof_threadCreateInternal(p)
742 })
743 }
744
745
746 func countGoroutine() int {
747 return runtime.NumGoroutine()
748 }
749
750
751 func writeGoroutine(w io.Writer, debug int) error {
752 if debug >= 2 {
753 return writeGoroutineStacks(w)
754 }
755 return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
756 }
757
758 func writeGoroutineStacks(w io.Writer) error {
759
760
761
762 buf := make([]byte, 1<<20)
763 for i := 0; ; i++ {
764 n := runtime.Stack(buf, true)
765 if n < len(buf) {
766 buf = buf[:n]
767 break
768 }
769 if len(buf) >= 64<<20 {
770
771 break
772 }
773 buf = make([]byte, 2*len(buf))
774 }
775 _, err := w.Write(buf)
776 return err
777 }
778
779 func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]profilerecord.StackRecord, []unsafe.Pointer) (int, bool)) error {
780
781
782
783
784
785
786 var p []profilerecord.StackRecord
787 var labels []unsafe.Pointer
788 n, ok := fetch(nil, nil)
789
790 for {
791
792
793
794 p = make([]profilerecord.StackRecord, n+10)
795 labels = make([]unsafe.Pointer, n+10)
796 n, ok = fetch(p, labels)
797 if ok {
798 p = p[0:n]
799 break
800 }
801
802 }
803
804 return printCountProfile(w, debug, name, &runtimeProfile{p, labels})
805 }
806
807 type runtimeProfile struct {
808 stk []profilerecord.StackRecord
809 labels []unsafe.Pointer
810 }
811
812 func (p *runtimeProfile) Len() int { return len(p.stk) }
813 func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack }
814 func (p *runtimeProfile) Label(i int) *labelMap { return (*labelMap)(p.labels[i]) }
815
816 var cpu struct {
817 sync.Mutex
818 profiling bool
819 done chan bool
820 }
821
822
823
824
825
826
827
828
829
830
831
832
833 func StartCPUProfile(w io.Writer) error {
834
835
836
837
838
839
840
841
842
843 const hz = 100
844
845 cpu.Lock()
846 defer cpu.Unlock()
847 if cpu.done == nil {
848 cpu.done = make(chan bool)
849 }
850
851 if cpu.profiling {
852 return fmt.Errorf("cpu profiling already in use")
853 }
854 cpu.profiling = true
855 runtime.SetCPUProfileRate(hz)
856 go profileWriter(w)
857 return nil
858 }
859
860
861
862
863
864
865 func readProfile() (data []uint64, tags []unsafe.Pointer, eof bool)
866
867 func profileWriter(w io.Writer) {
868 b := newProfileBuilder(w)
869 var err error
870 for {
871 time.Sleep(100 * time.Millisecond)
872 data, tags, eof := readProfile()
873 if e := b.addCPUData(data, tags); e != nil && err == nil {
874 err = e
875 }
876 if eof {
877 break
878 }
879 }
880 if err != nil {
881
882
883 panic("runtime/pprof: converting profile: " + err.Error())
884 }
885 b.build()
886 cpu.done <- true
887 }
888
889
890
891
892 func StopCPUProfile() {
893 cpu.Lock()
894 defer cpu.Unlock()
895
896 if !cpu.profiling {
897 return
898 }
899 cpu.profiling = false
900 runtime.SetCPUProfileRate(0)
901 <-cpu.done
902 }
903
904
905 func countBlock() int {
906 n, _ := runtime.BlockProfile(nil)
907 return n
908 }
909
910
911 func countMutex() int {
912 n, _ := runtime.MutexProfile(nil)
913 return n
914 }
915
916
917 func writeBlock(w io.Writer, debug int) error {
918 return writeProfileInternal(w, debug, "contention", pprof_blockProfileInternal)
919 }
920
921
922 func writeMutex(w io.Writer, debug int) error {
923 return writeProfileInternal(w, debug, "mutex", pprof_mutexProfileInternal)
924 }
925
926
927 func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]profilerecord.BlockProfileRecord) (int, bool)) error {
928 var p []profilerecord.BlockProfileRecord
929 n, ok := runtimeProfile(nil)
930 for {
931 p = make([]profilerecord.BlockProfileRecord, n+50)
932 n, ok = runtimeProfile(p)
933 if ok {
934 p = p[:n]
935 break
936 }
937 }
938
939 slices.SortFunc(p, func(a, b profilerecord.BlockProfileRecord) int {
940 return cmp.Compare(b.Cycles, a.Cycles)
941 })
942
943 if debug <= 0 {
944 return printCountCycleProfile(w, "contentions", "delay", p)
945 }
946
947 b := bufio.NewWriter(w)
948 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
949 w = tw
950
951 fmt.Fprintf(w, "--- %v:\n", name)
952 fmt.Fprintf(w, "cycles/second=%v\n", pprof_cyclesPerSecond())
953 if name == "mutex" {
954 fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1))
955 }
956 expandedStack := pprof_makeProfStack()
957 for i := range p {
958 r := &p[i]
959 fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
960 n := expandInlinedFrames(expandedStack, r.Stack)
961 stack := expandedStack[:n]
962 for _, pc := range stack {
963 fmt.Fprintf(w, " %#x", pc)
964 }
965 fmt.Fprint(w, "\n")
966 if debug > 0 {
967 printStackRecord(w, stack, true)
968 }
969 }
970
971 if tw != nil {
972 tw.Flush()
973 }
974 return b.Flush()
975 }
976
977
978 func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
979
980
981 func pprof_cyclesPerSecond() int64
982
983
984 func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)
985
986
987 func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
988
989
990 func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
991
992
993 func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
994
995
996 func pprof_fpunwindExpand(dst, src []uintptr) int
997
998
999 func pprof_makeProfStack() []uintptr
1000
View as plain text