1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 package pprof
77
78 import (
79 "bufio"
80 "cmp"
81 "fmt"
82 "internal/abi"
83 "internal/profilerecord"
84 "io"
85 "runtime"
86 "slices"
87 "sort"
88 "strings"
89 "sync"
90 "text/tabwriter"
91 "time"
92 "unsafe"
93 )
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 type Profile struct {
173 name string
174 mu sync.Mutex
175 m map[any][]uintptr
176 count func() int
177 write func(io.Writer, int) error
178 }
179
180
181 var profiles struct {
182 mu sync.Mutex
183 m map[string]*Profile
184 }
185
186 var goroutineProfile = &Profile{
187 name: "goroutine",
188 count: countGoroutine,
189 write: writeGoroutine,
190 }
191
192 var threadcreateProfile = &Profile{
193 name: "threadcreate",
194 count: countThreadCreate,
195 write: writeThreadCreate,
196 }
197
198 var heapProfile = &Profile{
199 name: "heap",
200 count: countHeap,
201 write: writeHeap,
202 }
203
204 var allocsProfile = &Profile{
205 name: "allocs",
206 count: countHeap,
207 write: writeAlloc,
208 }
209
210 var blockProfile = &Profile{
211 name: "block",
212 count: countBlock,
213 write: writeBlock,
214 }
215
216 var mutexProfile = &Profile{
217 name: "mutex",
218 count: countMutex,
219 write: writeMutex,
220 }
221
222 func lockProfiles() {
223 profiles.mu.Lock()
224 if profiles.m == nil {
225
226 profiles.m = map[string]*Profile{
227 "goroutine": goroutineProfile,
228 "threadcreate": threadcreateProfile,
229 "heap": heapProfile,
230 "allocs": allocsProfile,
231 "block": blockProfile,
232 "mutex": mutexProfile,
233 }
234 }
235 }
236
237 func unlockProfiles() {
238 profiles.mu.Unlock()
239 }
240
241
242
243
244
245
246
247 func NewProfile(name string) *Profile {
248 lockProfiles()
249 defer unlockProfiles()
250 if name == "" {
251 panic("pprof: NewProfile with empty name")
252 }
253 if profiles.m[name] != nil {
254 panic("pprof: NewProfile name already in use: " + name)
255 }
256 p := &Profile{
257 name: name,
258 m: map[any][]uintptr{},
259 }
260 profiles.m[name] = p
261 return p
262 }
263
264
265 func Lookup(name string) *Profile {
266 lockProfiles()
267 defer unlockProfiles()
268 return profiles.m[name]
269 }
270
271
272 func Profiles() []*Profile {
273 lockProfiles()
274 defer unlockProfiles()
275
276 all := make([]*Profile, 0, len(profiles.m))
277 for _, p := range profiles.m {
278 all = append(all, p)
279 }
280
281 slices.SortFunc(all, func(a, b *Profile) int {
282 return strings.Compare(a.name, b.name)
283 })
284 return all
285 }
286
287
288 func (p *Profile) Name() string {
289 return p.name
290 }
291
292
293 func (p *Profile) Count() int {
294 p.mu.Lock()
295 defer p.mu.Unlock()
296 if p.count != nil {
297 return p.count()
298 }
299 return len(p.m)
300 }
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319 func (p *Profile) Add(value any, skip int) {
320 if p.name == "" {
321 panic("pprof: use of uninitialized Profile")
322 }
323 if p.write != nil {
324 panic("pprof: Add called on built-in Profile " + p.name)
325 }
326
327 stk := make([]uintptr, 32)
328 n := runtime.Callers(skip+1, stk[:])
329 stk = stk[:n]
330 if len(stk) == 0 {
331
332 stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
333 }
334
335 p.mu.Lock()
336 defer p.mu.Unlock()
337 if p.m[value] != nil {
338 panic("pprof: Profile.Add of duplicate value")
339 }
340 p.m[value] = stk
341 }
342
343
344
345 func (p *Profile) Remove(value any) {
346 p.mu.Lock()
347 defer p.mu.Unlock()
348 delete(p.m, value)
349 }
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 func (p *Profile) WriteTo(w io.Writer, debug int) error {
367 if p.name == "" {
368 panic("pprof: use of zero Profile")
369 }
370 if p.write != nil {
371 return p.write(w, debug)
372 }
373
374
375 p.mu.Lock()
376 all := make([][]uintptr, 0, len(p.m))
377 for _, stk := range p.m {
378 all = append(all, stk)
379 }
380 p.mu.Unlock()
381
382
383 slices.SortFunc(all, slices.Compare)
384
385 return printCountProfile(w, debug, p.name, stackProfile(all))
386 }
387
388 type stackProfile [][]uintptr
389
390 func (x stackProfile) Len() int { return len(x) }
391 func (x stackProfile) Stack(i int) []uintptr { return x[i] }
392 func (x stackProfile) Label(i int) *labelMap { return nil }
393
394
395
396
397
398 type countProfile interface {
399 Len() int
400 Stack(i int) []uintptr
401 Label(i int) *labelMap
402 }
403
404
405
406
407 func expandInlinedFrames(dst, pcs []uintptr) int {
408 cf := runtime.CallersFrames(pcs)
409 var n int
410 for n < len(dst) {
411 f, more := cf.Next()
412
413
414 dst[n] = f.PC + 1
415 n++
416 if !more {
417 break
418 }
419 }
420 return n
421 }
422
423
424
425
426
427 func printCountCycleProfile(w io.Writer, countName, cycleName string, records []profilerecord.BlockProfileRecord) error {
428
429 b := newProfileBuilder(w)
430 b.pbValueType(tagProfile_PeriodType, countName, "count")
431 b.pb.int64Opt(tagProfile_Period, 1)
432 b.pbValueType(tagProfile_SampleType, countName, "count")
433 b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds")
434
435 cpuGHz := float64(pprof_cyclesPerSecond()) / 1e9
436
437 values := []int64{0, 0}
438 var locs []uint64
439 expandedStack := pprof_makeProfStack()
440 for _, r := range records {
441 values[0] = r.Count
442 values[1] = int64(float64(r.Cycles) / cpuGHz)
443
444
445 n := expandInlinedFrames(expandedStack, r.Stack)
446 locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
447 b.pbSample(values, locs, nil)
448 }
449 return b.build()
450 }
451
452
453
454 func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
455
456 var buf strings.Builder
457 key := func(stk []uintptr, lbls *labelMap) string {
458 buf.Reset()
459 fmt.Fprintf(&buf, "@")
460 for _, pc := range stk {
461 fmt.Fprintf(&buf, " %#x", pc)
462 }
463 if lbls != nil {
464 buf.WriteString("\n# labels: ")
465 buf.WriteString(lbls.String())
466 }
467 return buf.String()
468 }
469 count := map[string]int{}
470 index := map[string]int{}
471 var keys []string
472 n := p.Len()
473 for i := 0; i < n; i++ {
474 k := key(p.Stack(i), p.Label(i))
475 if count[k] == 0 {
476 index[k] = i
477 keys = append(keys, k)
478 }
479 count[k]++
480 }
481
482 sort.Sort(&keysByCount{keys, count})
483
484 if debug > 0 {
485
486 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
487 fmt.Fprintf(tw, "%s profile: total %d\n", name, p.Len())
488 for _, k := range keys {
489 fmt.Fprintf(tw, "%d %s\n", count[k], k)
490 printStackRecord(tw, p.Stack(index[k]), false)
491 }
492 return tw.Flush()
493 }
494
495
496 b := newProfileBuilder(w)
497 b.pbValueType(tagProfile_PeriodType, name, "count")
498 b.pb.int64Opt(tagProfile_Period, 1)
499 b.pbValueType(tagProfile_SampleType, name, "count")
500
501 values := []int64{0}
502 var locs []uint64
503 for _, k := range keys {
504 values[0] = int64(count[k])
505
506
507 locs = b.appendLocsForStack(locs[:0], p.Stack(index[k]))
508 idx := index[k]
509 var labels func()
510 if p.Label(idx) != nil {
511 labels = func() {
512 for _, lbl := range p.Label(idx).list {
513 b.pbLabel(tagSample_Label, lbl.key, lbl.value, 0)
514 }
515 }
516 }
517 b.pbSample(values, locs, labels)
518 }
519 return b.build()
520 }
521
522
523 type keysByCount struct {
524 keys []string
525 count map[string]int
526 }
527
528 func (x *keysByCount) Len() int { return len(x.keys) }
529 func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
530 func (x *keysByCount) Less(i, j int) bool {
531 ki, kj := x.keys[i], x.keys[j]
532 ci, cj := x.count[ki], x.count[kj]
533 if ci != cj {
534 return ci > cj
535 }
536 return ki < kj
537 }
538
539
540
541 func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
542 show := allFrames
543 frames := runtime.CallersFrames(stk)
544 for {
545 frame, more := frames.Next()
546 name := frame.Function
547 if name == "" {
548 show = true
549 fmt.Fprintf(w, "#\t%#x\n", frame.PC)
550 } else if name != "runtime.goexit" && (show || !(strings.HasPrefix(name, "runtime.") || strings.HasPrefix(name, "internal/runtime/"))) {
551
552
553 show = true
554 fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
555 }
556 if !more {
557 break
558 }
559 }
560 if !show {
561
562
563 printStackRecord(w, stk, true)
564 return
565 }
566 fmt.Fprintf(w, "\n")
567 }
568
569
570
571
572
573 func WriteHeapProfile(w io.Writer) error {
574 return writeHeap(w, 0)
575 }
576
577
578 func countHeap() int {
579 n, _ := runtime.MemProfile(nil, true)
580 return n
581 }
582
583
584 func writeHeap(w io.Writer, debug int) error {
585 return writeHeapInternal(w, debug, "")
586 }
587
588
589
590 func writeAlloc(w io.Writer, debug int) error {
591 return writeHeapInternal(w, debug, "alloc_space")
592 }
593
594 func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
595 var memStats *runtime.MemStats
596 if debug != 0 {
597
598
599 memStats = new(runtime.MemStats)
600 runtime.ReadMemStats(memStats)
601 }
602
603
604
605
606
607
608
609
610 var p []profilerecord.MemProfileRecord
611 n, ok := pprof_memProfileInternal(nil, true)
612 for {
613
614
615
616 p = make([]profilerecord.MemProfileRecord, n+50)
617 n, ok = pprof_memProfileInternal(p, true)
618 if ok {
619 p = p[0:n]
620 break
621 }
622
623 }
624
625 if debug == 0 {
626 return writeHeapProto(w, p, int64(runtime.MemProfileRate), defaultSampleType)
627 }
628
629 slices.SortFunc(p, func(a, b profilerecord.MemProfileRecord) int {
630 return cmp.Compare(a.InUseBytes(), b.InUseBytes())
631 })
632
633 b := bufio.NewWriter(w)
634 tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0)
635 w = tw
636
637 var total runtime.MemProfileRecord
638 for i := range p {
639 r := &p[i]
640 total.AllocBytes += r.AllocBytes
641 total.AllocObjects += r.AllocObjects
642 total.FreeBytes += r.FreeBytes
643 total.FreeObjects += r.FreeObjects
644 }
645
646
647
648
649 rate := 2 * runtime.MemProfileRate
650
651
652
653
654
655
656
657 inUseBytes := total.InUseBytes()
658 allocBytes := total.AllocBytes
659 if inUseBytes == allocBytes {
660 allocBytes++
661 }
662
663 fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
664 total.InUseObjects(), inUseBytes,
665 total.AllocObjects, allocBytes,
666 rate)
667
668 for i := range p {
669 r := &p[i]
670 fmt.Fprintf(w, "%d: %d [%d: %d] @",
671 r.InUseObjects(), r.InUseBytes(),
672 r.AllocObjects, r.AllocBytes)
673 for _, pc := range r.Stack {
674 fmt.Fprintf(w, " %#x", pc)
675 }
676 fmt.Fprintf(w, "\n")
677 printStackRecord(w, r.Stack, false)
678 }
679
680
681
682 s := memStats
683 fmt.Fprintf(w, "\n# runtime.MemStats\n")
684 fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
685 fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
686 fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
687 fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
688 fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
689 fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
690
691 fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
692 fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
693 fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
694 fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
695 fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
696 fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
697
698 fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
699 fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
700 fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
701 fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
702 fmt.Fprintf(w, "# GCSys = %d\n", s.GCSys)
703 fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys)
704
705 fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
706 fmt.Fprintf(w, "# LastGC = %d\n", s.LastGC)
707 fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
708 fmt.Fprintf(w, "# PauseEnd = %d\n", s.PauseEnd)
709 fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
710 fmt.Fprintf(w, "# NumForcedGC = %d\n", s.NumForcedGC)
711 fmt.Fprintf(w, "# GCCPUFraction = %v\n", s.GCCPUFraction)
712 fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
713
714
715 addMaxRSS(w)
716
717 tw.Flush()
718 return b.Flush()
719 }
720
721
722 func countThreadCreate() int {
723 n, _ := runtime.ThreadCreateProfile(nil)
724 return n
725 }
726
727
728 func writeThreadCreate(w io.Writer, debug int) error {
729
730
731
732 return writeRuntimeProfile(w, debug, "threadcreate", func(p []profilerecord.StackRecord, _ []unsafe.Pointer) (n int, ok bool) {
733 return pprof_threadCreateInternal(p)
734 })
735 }
736
737
738 func countGoroutine() int {
739 return runtime.NumGoroutine()
740 }
741
742
743 func writeGoroutine(w io.Writer, debug int) error {
744 if debug >= 2 {
745 return writeGoroutineStacks(w)
746 }
747 return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
748 }
749
750 func writeGoroutineStacks(w io.Writer) error {
751
752
753
754 buf := make([]byte, 1<<20)
755 for i := 0; ; i++ {
756 n := runtime.Stack(buf, true)
757 if n < len(buf) {
758 buf = buf[:n]
759 break
760 }
761 if len(buf) >= 64<<20 {
762
763 break
764 }
765 buf = make([]byte, 2*len(buf))
766 }
767 _, err := w.Write(buf)
768 return err
769 }
770
771 func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]profilerecord.StackRecord, []unsafe.Pointer) (int, bool)) error {
772
773
774
775
776
777
778 var p []profilerecord.StackRecord
779 var labels []unsafe.Pointer
780 n, ok := fetch(nil, nil)
781
782 for {
783
784
785
786 p = make([]profilerecord.StackRecord, n+10)
787 labels = make([]unsafe.Pointer, n+10)
788 n, ok = fetch(p, labels)
789 if ok {
790 p = p[0:n]
791 break
792 }
793
794 }
795
796 return printCountProfile(w, debug, name, &runtimeProfile{p, labels})
797 }
798
799 type runtimeProfile struct {
800 stk []profilerecord.StackRecord
801 labels []unsafe.Pointer
802 }
803
804 func (p *runtimeProfile) Len() int { return len(p.stk) }
805 func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack }
806 func (p *runtimeProfile) Label(i int) *labelMap { return (*labelMap)(p.labels[i]) }
807
808 var cpu struct {
809 sync.Mutex
810 profiling bool
811 done chan bool
812 }
813
814
815
816
817
818
819
820
821
822
823
824
825 func StartCPUProfile(w io.Writer) error {
826
827
828
829
830
831
832
833
834
835 const hz = 100
836
837 cpu.Lock()
838 defer cpu.Unlock()
839 if cpu.done == nil {
840 cpu.done = make(chan bool)
841 }
842
843 if cpu.profiling {
844 return fmt.Errorf("cpu profiling already in use")
845 }
846 cpu.profiling = true
847 runtime.SetCPUProfileRate(hz)
848 go profileWriter(w)
849 return nil
850 }
851
852
853
854
855
856
857 func readProfile() (data []uint64, tags []unsafe.Pointer, eof bool)
858
859 func profileWriter(w io.Writer) {
860 b := newProfileBuilder(w)
861 var err error
862 for {
863 time.Sleep(100 * time.Millisecond)
864 data, tags, eof := readProfile()
865 if e := b.addCPUData(data, tags); e != nil && err == nil {
866 err = e
867 }
868 if eof {
869 break
870 }
871 }
872 if err != nil {
873
874
875 panic("runtime/pprof: converting profile: " + err.Error())
876 }
877 b.build()
878 cpu.done <- true
879 }
880
881
882
883
884 func StopCPUProfile() {
885 cpu.Lock()
886 defer cpu.Unlock()
887
888 if !cpu.profiling {
889 return
890 }
891 cpu.profiling = false
892 runtime.SetCPUProfileRate(0)
893 <-cpu.done
894 }
895
896
897 func countBlock() int {
898 n, _ := runtime.BlockProfile(nil)
899 return n
900 }
901
902
903 func countMutex() int {
904 n, _ := runtime.MutexProfile(nil)
905 return n
906 }
907
908
909 func writeBlock(w io.Writer, debug int) error {
910 return writeProfileInternal(w, debug, "contention", pprof_blockProfileInternal)
911 }
912
913
914 func writeMutex(w io.Writer, debug int) error {
915 return writeProfileInternal(w, debug, "mutex", pprof_mutexProfileInternal)
916 }
917
918
919 func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]profilerecord.BlockProfileRecord) (int, bool)) error {
920 var p []profilerecord.BlockProfileRecord
921 n, ok := runtimeProfile(nil)
922 for {
923 p = make([]profilerecord.BlockProfileRecord, n+50)
924 n, ok = runtimeProfile(p)
925 if ok {
926 p = p[:n]
927 break
928 }
929 }
930
931 slices.SortFunc(p, func(a, b profilerecord.BlockProfileRecord) int {
932 return cmp.Compare(b.Cycles, a.Cycles)
933 })
934
935 if debug <= 0 {
936 return printCountCycleProfile(w, "contentions", "delay", p)
937 }
938
939 b := bufio.NewWriter(w)
940 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
941 w = tw
942
943 fmt.Fprintf(w, "--- %v:\n", name)
944 fmt.Fprintf(w, "cycles/second=%v\n", pprof_cyclesPerSecond())
945 if name == "mutex" {
946 fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1))
947 }
948 expandedStack := pprof_makeProfStack()
949 for i := range p {
950 r := &p[i]
951 fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
952 n := expandInlinedFrames(expandedStack, r.Stack)
953 stack := expandedStack[:n]
954 for _, pc := range stack {
955 fmt.Fprintf(w, " %#x", pc)
956 }
957 fmt.Fprint(w, "\n")
958 if debug > 0 {
959 printStackRecord(w, stack, true)
960 }
961 }
962
963 if tw != nil {
964 tw.Flush()
965 }
966 return b.Flush()
967 }
968
969
970 func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
971
972
973 func pprof_cyclesPerSecond() int64
974
975
976 func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)
977
978
979 func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
980
981
982 func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
983
984
985 func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
986
987
988 func pprof_fpunwindExpand(dst, src []uintptr) int
989
990
991 func pprof_makeProfStack() []uintptr
992
View as plain text