1
2
3
4
5
6
7 package pprof
8
9 import (
10 "bytes"
11 "context"
12 "fmt"
13 "internal/abi"
14 "internal/profile"
15 "internal/syscall/unix"
16 "internal/testenv"
17 "io"
18 "iter"
19 "math"
20 "math/big"
21 "os"
22 "regexp"
23 "runtime"
24 "runtime/debug"
25 "strconv"
26 "strings"
27 "sync"
28 "sync/atomic"
29 "testing"
30 "time"
31 _ "unsafe"
32 )
33
34 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
35
36
37
38
39
40 t0 := time.Now()
41 accum := *y
42 for i := 0; i < 500 || time.Since(t0) < dur; i++ {
43 accum = f(accum)
44 }
45 *y = accum
46 }
47
48 var (
49 salt1 = 0
50 salt2 = 0
51 )
52
53
54
55
56 func cpuHog1(x int) int {
57 return cpuHog0(x, 1e5)
58 }
59
60 func cpuHog0(x, n int) int {
61 foo := x
62 for i := 0; i < n; i++ {
63 if foo > 0 {
64 foo *= foo
65 } else {
66 foo *= foo + 1
67 }
68 }
69 return foo
70 }
71
72 func cpuHog2(x int) int {
73 foo := x
74 for i := 0; i < 1e5; i++ {
75 if foo > 0 {
76 foo *= foo
77 } else {
78 foo *= foo + 2
79 }
80 }
81 return foo
82 }
83
84
85
86 func avoidFunctions() []string {
87 if runtime.Compiler == "gccgo" {
88 return []string{"runtime.sigprof"}
89 }
90 return nil
91 }
92
93 func TestCPUProfile(t *testing.T) {
94 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
95 testCPUProfile(t, matches, func(dur time.Duration) {
96 cpuHogger(cpuHog1, &salt1, dur)
97 })
98 }
99
100 func TestCPUProfileMultithreaded(t *testing.T) {
101 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
102 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
103 testCPUProfile(t, matches, func(dur time.Duration) {
104 c := make(chan int)
105 go func() {
106 cpuHogger(cpuHog1, &salt1, dur)
107 c <- 1
108 }()
109 cpuHogger(cpuHog2, &salt2, dur)
110 <-c
111 })
112 }
113
114 func TestCPUProfileMultithreadMagnitude(t *testing.T) {
115 if runtime.GOOS != "linux" {
116 t.Skip("issue 35057 is only confirmed on Linux")
117 }
118
119
120
121 major, minor := unix.KernelVersion()
122 t.Logf("Running on Linux %d.%d", major, minor)
123 defer func() {
124 if t.Failed() {
125 t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
126 }
127 }()
128
129
130
131
132 if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
133 have59 := major > 5 || (major == 5 && minor >= 9)
134 have516 := major > 5 || (major == 5 && minor >= 16)
135 if have59 && !have516 {
136 testenv.SkipFlaky(t, 49065)
137 }
138 }
139
140
141
142
143
144
145
146
147
148
149
150
151 maxDiff := 0.10
152 if testing.Short() {
153 maxDiff = 0.40
154 }
155
156 compare := func(a, b time.Duration, maxDiff float64) error {
157 if a <= 0 || b <= 0 {
158 return fmt.Errorf("Expected both time reports to be positive")
159 }
160
161 if a < b {
162 a, b = b, a
163 }
164
165 diff := float64(a-b) / float64(a)
166 if diff > maxDiff {
167 return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
168 }
169
170 return nil
171 }
172
173 for _, tc := range []struct {
174 name string
175 workers int
176 }{
177 {
178 name: "serial",
179 workers: 1,
180 },
181 {
182 name: "parallel",
183 workers: runtime.GOMAXPROCS(0),
184 },
185 } {
186
187 t.Run(tc.name, func(t *testing.T) {
188 t.Logf("Running with %d workers", tc.workers)
189
190 var userTime, systemTime time.Duration
191 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
192 acceptProfile := func(t *testing.T, p *profile.Profile) bool {
193 if !matches(t, p) {
194 return false
195 }
196
197 ok := true
198 for i, unit := range []string{"count", "nanoseconds"} {
199 if have, want := p.SampleType[i].Unit, unit; have != want {
200 t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
201 ok = false
202 }
203 }
204
205
206
207
208
209
210 var value time.Duration
211 for _, sample := range p.Sample {
212 value += time.Duration(sample.Value[1]) * time.Nanosecond
213 }
214
215 totalTime := userTime + systemTime
216 t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
217 if err := compare(totalTime, value, maxDiff); err != nil {
218 t.Logf("compare got %v want nil", err)
219 ok = false
220 }
221
222 return ok
223 }
224
225 testCPUProfile(t, acceptProfile, func(dur time.Duration) {
226 userTime, systemTime = diffCPUTime(t, func() {
227 var wg sync.WaitGroup
228 var once sync.Once
229 for i := 0; i < tc.workers; i++ {
230 wg.Add(1)
231 go func() {
232 defer wg.Done()
233 var salt = 0
234 cpuHogger(cpuHog1, &salt, dur)
235 once.Do(func() { salt1 = salt })
236 }()
237 }
238 wg.Wait()
239 })
240 })
241 })
242 }
243 }
244
245
246
247 func containsInlinedCall(f any, maxBytes int) bool {
248 _, found := findInlinedCall(f, maxBytes)
249 return found
250 }
251
252
253
254 func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
255 fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
256 if fFunc == nil || fFunc.Entry() == 0 {
257 panic("failed to locate function entry")
258 }
259
260 for offset := 0; offset < maxBytes; offset++ {
261 innerPC := fFunc.Entry() + uintptr(offset)
262 inner := runtime.FuncForPC(innerPC)
263 if inner == nil {
264
265
266 continue
267 }
268 if inner.Entry() != fFunc.Entry() {
269
270 break
271 }
272 if inner.Name() != fFunc.Name() {
273
274
275 return uint64(innerPC), true
276 }
277 }
278
279 return 0, false
280 }
281
282 func TestCPUProfileInlining(t *testing.T) {
283 if !containsInlinedCall(inlinedCaller, 4<<10) {
284 t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
285 }
286
287 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
288 p := testCPUProfile(t, matches, func(dur time.Duration) {
289 cpuHogger(inlinedCaller, &salt1, dur)
290 })
291
292
293 for _, loc := range p.Location {
294 hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
295 for _, line := range loc.Line {
296 if line.Function.Name == "runtime/pprof.inlinedCallee" {
297 hasInlinedCallee = true
298 }
299 if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
300 hasInlinedCallerAfterInlinedCallee = true
301 }
302 }
303 if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
304 t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
305 }
306 }
307 }
308
309 func inlinedCaller(x int) int {
310 x = inlinedCallee(x, 1e5)
311 return x
312 }
313
314 func inlinedCallee(x, n int) int {
315 return cpuHog0(x, n)
316 }
317
318
319 func dumpCallers(pcs []uintptr) {
320 if pcs == nil {
321 return
322 }
323
324 skip := 2
325 runtime.Callers(skip, pcs)
326 }
327
328
329 func inlinedCallerDump(pcs []uintptr) {
330 inlinedCalleeDump(pcs)
331 }
332
333 func inlinedCalleeDump(pcs []uintptr) {
334 dumpCallers(pcs)
335 }
336
337 type inlineWrapperInterface interface {
338 dump(stack []uintptr)
339 }
340
341 type inlineWrapper struct {
342 }
343
344 func (h inlineWrapper) dump(pcs []uintptr) {
345 dumpCallers(pcs)
346 }
347
348 func inlinedWrapperCallerDump(pcs []uintptr) {
349 var h inlineWrapperInterface
350 h = &inlineWrapper{}
351 h.dump(pcs)
352 }
353
354 func TestCPUProfileRecursion(t *testing.T) {
355 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
356 p := testCPUProfile(t, matches, func(dur time.Duration) {
357 cpuHogger(recursionCaller, &salt1, dur)
358 })
359
360
361 for i, loc := range p.Location {
362 recursionFunc := 0
363 for _, line := range loc.Line {
364 if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
365 recursionFunc++
366 }
367 }
368 if recursionFunc > 1 {
369 t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
370 }
371 }
372 }
373
374 func recursionCaller(x int) int {
375 y := recursionCallee(3, x)
376 return y
377 }
378
379 func recursionCallee(n, x int) int {
380 if n == 0 {
381 return 1
382 }
383 y := inlinedCallee(x, 1e4)
384 return y * recursionCallee(n-1, x)
385 }
386
387 func recursionChainTop(x int, pcs []uintptr) {
388 if x < 0 {
389 return
390 }
391 recursionChainMiddle(x, pcs)
392 }
393
394 func recursionChainMiddle(x int, pcs []uintptr) {
395 recursionChainBottom(x, pcs)
396 }
397
398 func recursionChainBottom(x int, pcs []uintptr) {
399
400
401 dumpCallers(pcs)
402
403 recursionChainTop(x-1, pcs)
404 }
405
406 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
407 p, err := profile.Parse(bytes.NewReader(valBytes))
408 if err != nil {
409 t.Fatal(err)
410 }
411 for _, sample := range p.Sample {
412 count := uintptr(sample.Value[0])
413 f(count, sample.Location, sample.Label)
414 }
415 return p
416 }
417
418 func cpuProfilingBroken() bool {
419 switch runtime.GOOS {
420 case "plan9":
421
422 return true
423 case "aix":
424
425 return true
426 case "ios", "dragonfly", "netbsd", "illumos", "solaris":
427
428 return true
429 case "openbsd":
430 if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
431
432 return true
433 }
434 }
435
436 return false
437 }
438
439
440
441 func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
442 switch runtime.GOOS {
443 case "darwin":
444 out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
445 if err != nil {
446 t.Fatal(err)
447 }
448 vers := string(out)
449 t.Logf("uname -a: %v", vers)
450 case "plan9":
451 t.Skip("skipping on plan9")
452 case "wasip1":
453 t.Skip("skipping on wasip1")
454 }
455
456 broken := cpuProfilingBroken()
457
458 deadline, ok := t.Deadline()
459 if broken || !ok {
460 if broken && testing.Short() {
461
462 deadline = time.Now().Add(1 * time.Second)
463 } else {
464 deadline = time.Now().Add(10 * time.Second)
465 }
466 }
467
468
469
470 duration := 5 * time.Second
471 if testing.Short() {
472 duration = 100 * time.Millisecond
473 }
474
475
476
477
478
479
480 for {
481 var prof bytes.Buffer
482 if err := StartCPUProfile(&prof); err != nil {
483 t.Fatal(err)
484 }
485 f(duration)
486 StopCPUProfile()
487
488 if p, ok := profileOk(t, matches, prof, duration); ok {
489 return p
490 }
491
492 duration *= 2
493 if time.Until(deadline) < duration {
494 break
495 }
496 t.Logf("retrying with %s duration", duration)
497 }
498
499 if broken {
500 t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
501 }
502
503
504
505
506
507 if os.Getenv("IN_QEMU") == "1" {
508 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
509 }
510 t.FailNow()
511 return nil
512 }
513
514 var diffCPUTimeImpl func(f func()) (user, system time.Duration)
515
516 func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
517 if fn := diffCPUTimeImpl; fn != nil {
518 return fn(f)
519 }
520 t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
521 return 0, 0
522 }
523
524 func contains(slice []string, s string) bool {
525 for i := range slice {
526 if slice[i] == s {
527 return true
528 }
529 }
530 return false
531 }
532
533
534 func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
535 for _, loc := range stk {
536 for _, line := range loc.Line {
537 if strings.Contains(line.Function.Name, spec) {
538 return true
539 }
540 }
541 }
542 return false
543 }
544
545 type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
546
547 func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
548 ok = true
549
550 var samples uintptr
551 var buf strings.Builder
552 p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
553 fmt.Fprintf(&buf, "%d:", count)
554 fprintStack(&buf, stk)
555 fmt.Fprintf(&buf, " labels: %v\n", labels)
556 samples += count
557 fmt.Fprintf(&buf, "\n")
558 })
559 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
560
561 if samples < 10 && runtime.GOOS == "windows" {
562
563
564
565 t.Log("too few samples on Windows (golang.org/issue/10842)")
566 return p, false
567 }
568
569
570
571
572
573
574 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
575 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
576 ok = false
577 }
578
579 if matches != nil && !matches(t, p) {
580 ok = false
581 }
582
583 return p, ok
584 }
585
586 type profileMatchFunc func(*testing.T, *profile.Profile) bool
587
588 func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
589 return func(t *testing.T, p *profile.Profile) (ok bool) {
590 ok = true
591
592
593
594 have := make([]uintptr, len(need))
595 avoidSamples := make([]uintptr, len(avoid))
596
597 for _, sample := range p.Sample {
598 count := uintptr(sample.Value[0])
599 for i, spec := range need {
600 if matches(spec, count, sample.Location, sample.Label) {
601 have[i] += count
602 }
603 }
604 for i, name := range avoid {
605 for _, loc := range sample.Location {
606 for _, line := range loc.Line {
607 if strings.Contains(line.Function.Name, name) {
608 avoidSamples[i] += count
609 }
610 }
611 }
612 }
613 }
614
615 for i, name := range avoid {
616 bad := avoidSamples[i]
617 if bad != 0 {
618 t.Logf("found %d samples in avoid-function %s\n", bad, name)
619 ok = false
620 }
621 }
622
623 if len(need) == 0 {
624 return
625 }
626
627 var total uintptr
628 for i, name := range need {
629 total += have[i]
630 t.Logf("found %d samples in expected function %s\n", have[i], name)
631 }
632 if total == 0 {
633 t.Logf("no samples in expected functions")
634 ok = false
635 }
636
637
638
639
640
641 min := uintptr(1)
642 for i, name := range need {
643 if have[i] < min {
644 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
645 ok = false
646 }
647 }
648 return
649 }
650 }
651
652
653
654 func TestCPUProfileWithFork(t *testing.T) {
655 testenv.MustHaveExec(t)
656
657 exe, err := os.Executable()
658 if err != nil {
659 t.Fatal(err)
660 }
661
662 heap := 1 << 30
663 if runtime.GOOS == "android" {
664
665 heap = 100 << 20
666 }
667 if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
668
669 heap = 100 << 20
670 }
671 if testing.Short() {
672 heap = 100 << 20
673 }
674
675 garbage := make([]byte, heap)
676
677 done := make(chan bool)
678 go func() {
679 for i := range garbage {
680 garbage[i] = 42
681 }
682 done <- true
683 }()
684 <-done
685
686 var prof bytes.Buffer
687 if err := StartCPUProfile(&prof); err != nil {
688 t.Fatal(err)
689 }
690 defer StopCPUProfile()
691
692 for i := 0; i < 10; i++ {
693 testenv.Command(t, exe, "-h").CombinedOutput()
694 }
695 }
696
697
698
699
700 func TestGoroutineSwitch(t *testing.T) {
701 if runtime.Compiler == "gccgo" {
702 t.Skip("not applicable for gccgo")
703 }
704
705
706
707 tries := 10
708 count := 1000000
709 if testing.Short() {
710 tries = 1
711 }
712 for try := 0; try < tries; try++ {
713 var prof bytes.Buffer
714 if err := StartCPUProfile(&prof); err != nil {
715 t.Fatal(err)
716 }
717 for i := 0; i < count; i++ {
718 runtime.Gosched()
719 }
720 StopCPUProfile()
721
722
723
724
725
726 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
727
728
729 if len(stk) == 2 {
730 name := stk[1].Line[0].Function.Name
731 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
732 return
733 }
734 }
735
736
737
738 if len(stk) == 1 {
739 return
740 }
741
742
743
744 name := stk[0].Line[0].Function.Name
745 if name == "gogo" {
746 var buf strings.Builder
747 fprintStack(&buf, stk)
748 t.Fatalf("found profile entry for gogo:\n%s", buf.String())
749 }
750 })
751 }
752 }
753
754 func fprintStack(w io.Writer, stk []*profile.Location) {
755 if len(stk) == 0 {
756 fmt.Fprintf(w, " (stack empty)")
757 }
758 for _, loc := range stk {
759 fmt.Fprintf(w, " %#x", loc.Address)
760 fmt.Fprintf(w, " (")
761 for i, line := range loc.Line {
762 if i > 0 {
763 fmt.Fprintf(w, " ")
764 }
765 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
766 }
767 fmt.Fprintf(w, ")")
768 }
769 }
770
771
772 func TestMathBigDivide(t *testing.T) {
773 testCPUProfile(t, nil, func(duration time.Duration) {
774 t := time.After(duration)
775 pi := new(big.Int)
776 for {
777 for i := 0; i < 100; i++ {
778 n := big.NewInt(2646693125139304345)
779 d := big.NewInt(842468587426513207)
780 pi.Div(n, d)
781 }
782 select {
783 case <-t:
784 return
785 default:
786 }
787 }
788 })
789 }
790
791
792 func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
793 for _, f := range strings.Split(spec, ",") {
794 if !stackContains(f, count, stk, labels) {
795 return false
796 }
797 }
798 return true
799 }
800
801 func TestMorestack(t *testing.T) {
802 matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
803 testCPUProfile(t, matches, func(duration time.Duration) {
804 t := time.After(duration)
805 c := make(chan bool)
806 for {
807 go func() {
808 growstack1()
809 c <- true
810 }()
811 select {
812 case <-t:
813 return
814 case <-c:
815 }
816 }
817 })
818 }
819
820
821 func growstack1() {
822 growstack(10)
823 }
824
825
826 func growstack(n int) {
827 var buf [8 << 18]byte
828 use(buf)
829 if n > 0 {
830 growstack(n - 1)
831 }
832 }
833
834
835 func use(x [8 << 18]byte) {}
836
837 func TestBlockProfile(t *testing.T) {
838 type TestCase struct {
839 name string
840 f func(*testing.T)
841 stk []string
842 re string
843 }
844 tests := [...]TestCase{
845 {
846 name: "chan recv",
847 f: blockChanRecv,
848 stk: []string{
849 "runtime.chanrecv1",
850 "runtime/pprof.blockChanRecv",
851 "runtime/pprof.TestBlockProfile",
852 },
853 re: `
854 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
855 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
856 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
857 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
858 `},
859 {
860 name: "chan send",
861 f: blockChanSend,
862 stk: []string{
863 "runtime.chansend1",
864 "runtime/pprof.blockChanSend",
865 "runtime/pprof.TestBlockProfile",
866 },
867 re: `
868 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
869 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
870 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
871 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
872 `},
873 {
874 name: "chan close",
875 f: blockChanClose,
876 stk: []string{
877 "runtime.chanrecv1",
878 "runtime/pprof.blockChanClose",
879 "runtime/pprof.TestBlockProfile",
880 },
881 re: `
882 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
883 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
884 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
885 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
886 `},
887 {
888 name: "select recv async",
889 f: blockSelectRecvAsync,
890 stk: []string{
891 "runtime.selectgo",
892 "runtime/pprof.blockSelectRecvAsync",
893 "runtime/pprof.TestBlockProfile",
894 },
895 re: `
896 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
897 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
898 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
899 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
900 `},
901 {
902 name: "select send sync",
903 f: blockSelectSendSync,
904 stk: []string{
905 "runtime.selectgo",
906 "runtime/pprof.blockSelectSendSync",
907 "runtime/pprof.TestBlockProfile",
908 },
909 re: `
910 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
911 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
912 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
913 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
914 `},
915 {
916 name: "mutex",
917 f: blockMutex,
918 stk: []string{
919 "sync.(*Mutex).Lock",
920 "runtime/pprof.blockMutex",
921 "runtime/pprof.TestBlockProfile",
922 },
923 re: `
924 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
925 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*sync/mutex\.go:[0-9]+
926 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
927 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
928 `},
929 {
930 name: "cond",
931 f: blockCond,
932 stk: []string{
933 "sync.(*Cond).Wait",
934 "runtime/pprof.blockCond",
935 "runtime/pprof.TestBlockProfile",
936 },
937 re: `
938 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
939 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*sync/cond\.go:[0-9]+
940 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
941 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
942 `},
943 }
944
945
946 runtime.SetBlockProfileRate(1)
947 defer runtime.SetBlockProfileRate(0)
948 for _, test := range tests {
949 test.f(t)
950 }
951
952 t.Run("debug=1", func(t *testing.T) {
953 var w strings.Builder
954 Lookup("block").WriteTo(&w, 1)
955 prof := w.String()
956
957 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
958 t.Fatalf("Bad profile header:\n%v", prof)
959 }
960
961 if strings.HasSuffix(prof, "#\t0x0\n\n") {
962 t.Errorf("Useless 0 suffix:\n%v", prof)
963 }
964
965 for _, test := range tests {
966 if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
967 t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
968 }
969 }
970 })
971
972 t.Run("proto", func(t *testing.T) {
973
974 var w bytes.Buffer
975 Lookup("block").WriteTo(&w, 0)
976 p, err := profile.Parse(&w)
977 if err != nil {
978 t.Fatalf("failed to parse profile: %v", err)
979 }
980 t.Logf("parsed proto: %s", p)
981 if err := p.CheckValid(); err != nil {
982 t.Fatalf("invalid profile: %v", err)
983 }
984
985 stks := profileStacks(p)
986 for _, test := range tests {
987 if !containsStack(stks, test.stk) {
988 t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
989 }
990 }
991 })
992
993 }
994
995 func profileStacks(p *profile.Profile) (res [][]string) {
996 for _, s := range p.Sample {
997 var stk []string
998 for _, l := range s.Location {
999 for _, line := range l.Line {
1000 stk = append(stk, line.Function.Name)
1001 }
1002 }
1003 res = append(res, stk)
1004 }
1005 return res
1006 }
1007
1008 func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
1009 for _, record := range records {
1010 frames := runtime.CallersFrames(record.Stack())
1011 var stk []string
1012 for {
1013 frame, more := frames.Next()
1014 stk = append(stk, frame.Function)
1015 if !more {
1016 break
1017 }
1018 }
1019 res = append(res, stk)
1020 }
1021 return res
1022 }
1023
1024 func containsStack(got [][]string, want []string) bool {
1025 for _, stk := range got {
1026 if len(stk) < len(want) {
1027 continue
1028 }
1029 for i, f := range want {
1030 if f != stk[i] {
1031 break
1032 }
1033 if i == len(want)-1 {
1034 return true
1035 }
1036 }
1037 }
1038 return false
1039 }
1040
1041
1042
1043
1044 func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
1045 re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
1046 r := regexp.MustCompile(re)
1047
1048 if deadline, ok := t.Deadline(); ok {
1049 if d := time.Until(deadline); d > 1*time.Second {
1050 timer := time.AfterFunc(d-1*time.Second, func() {
1051 debug.SetTraceback("all")
1052 panic(fmt.Sprintf("timed out waiting for %#q", re))
1053 })
1054 defer timer.Stop()
1055 }
1056 }
1057
1058 buf := make([]byte, 64<<10)
1059 for {
1060 runtime.Gosched()
1061 n := runtime.Stack(buf, true)
1062 if n == len(buf) {
1063
1064
1065 buf = make([]byte, 2*len(buf))
1066 continue
1067 }
1068 if len(r.FindAll(buf[:n], -1)) >= count {
1069 return
1070 }
1071 }
1072 }
1073
1074 func blockChanRecv(t *testing.T) {
1075 c := make(chan bool)
1076 go func() {
1077 awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
1078 c <- true
1079 }()
1080 <-c
1081 }
1082
1083 func blockChanSend(t *testing.T) {
1084 c := make(chan bool)
1085 go func() {
1086 awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
1087 <-c
1088 }()
1089 c <- true
1090 }
1091
1092 func blockChanClose(t *testing.T) {
1093 c := make(chan bool)
1094 go func() {
1095 awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
1096 close(c)
1097 }()
1098 <-c
1099 }
1100
1101 func blockSelectRecvAsync(t *testing.T) {
1102 const numTries = 3
1103 c := make(chan bool, 1)
1104 c2 := make(chan bool, 1)
1105 go func() {
1106 for i := 0; i < numTries; i++ {
1107 awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
1108 c <- true
1109 }
1110 }()
1111 for i := 0; i < numTries; i++ {
1112 select {
1113 case <-c:
1114 case <-c2:
1115 }
1116 }
1117 }
1118
1119 func blockSelectSendSync(t *testing.T) {
1120 c := make(chan bool)
1121 c2 := make(chan bool)
1122 go func() {
1123 awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
1124 <-c
1125 }()
1126 select {
1127 case c <- true:
1128 case c2 <- true:
1129 }
1130 }
1131
1132 func blockMutex(t *testing.T) {
1133 var mu sync.Mutex
1134 mu.Lock()
1135 go func() {
1136 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
1137 mu.Unlock()
1138 }()
1139
1140
1141
1142
1143 mu.Lock()
1144 }
1145
1146 func blockMutexN(t *testing.T, n int, d time.Duration) {
1147 var wg sync.WaitGroup
1148 var mu sync.Mutex
1149 mu.Lock()
1150 go func() {
1151 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
1152 time.Sleep(d)
1153 mu.Unlock()
1154 }()
1155
1156
1157
1158
1159 for i := 0; i < n; i++ {
1160 wg.Add(1)
1161 go func() {
1162 defer wg.Done()
1163 mu.Lock()
1164 mu.Unlock()
1165 }()
1166 }
1167 wg.Wait()
1168 }
1169
1170 func blockCond(t *testing.T) {
1171 var mu sync.Mutex
1172 c := sync.NewCond(&mu)
1173 mu.Lock()
1174 go func() {
1175 awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
1176 mu.Lock()
1177 c.Signal()
1178 mu.Unlock()
1179 }()
1180 c.Wait()
1181 mu.Unlock()
1182 }
1183
1184
1185 func TestBlockProfileBias(t *testing.T) {
1186 rate := int(1000)
1187 runtime.SetBlockProfileRate(rate)
1188 defer runtime.SetBlockProfileRate(0)
1189
1190
1191 blockFrequentShort(rate)
1192 blockInfrequentLong(rate)
1193
1194 var w bytes.Buffer
1195 Lookup("block").WriteTo(&w, 0)
1196 p, err := profile.Parse(&w)
1197 if err != nil {
1198 t.Fatalf("failed to parse profile: %v", err)
1199 }
1200 t.Logf("parsed proto: %s", p)
1201
1202 il := float64(-1)
1203 fs := float64(-1)
1204 for _, s := range p.Sample {
1205 for _, l := range s.Location {
1206 for _, line := range l.Line {
1207 if len(s.Value) < 2 {
1208 t.Fatal("block profile has less than 2 sample types")
1209 }
1210
1211 if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
1212 il = float64(s.Value[1])
1213 } else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
1214 fs = float64(s.Value[1])
1215 }
1216 }
1217 }
1218 }
1219 if il == -1 || fs == -1 {
1220 t.Fatal("block profile is missing expected functions")
1221 }
1222
1223
1224 const threshold = 0.2
1225 if bias := (il - fs) / il; math.Abs(bias) > threshold {
1226 t.Fatalf("bias: abs(%f) > %f", bias, threshold)
1227 } else {
1228 t.Logf("bias: abs(%f) < %f", bias, threshold)
1229 }
1230 }
1231
1232
1233
1234 func blockFrequentShort(rate int) {
1235 for i := 0; i < 100000; i++ {
1236 blockevent(int64(rate/10), 1)
1237 }
1238 }
1239
1240
1241
1242 func blockInfrequentLong(rate int) {
1243 for i := 0; i < 10000; i++ {
1244 blockevent(int64(rate), 1)
1245 }
1246 }
1247
1248
1249
1250
1251 func blockevent(cycles int64, skip int)
1252
1253 func TestMutexProfile(t *testing.T) {
1254
1255
1256 old := runtime.SetMutexProfileFraction(1)
1257 defer runtime.SetMutexProfileFraction(old)
1258 if old != 0 {
1259 t.Fatalf("need MutexProfileRate 0, got %d", old)
1260 }
1261
1262 const (
1263 N = 100
1264 D = 100 * time.Millisecond
1265 )
1266 start := time.Now()
1267 blockMutexN(t, N, D)
1268 blockMutexNTime := time.Since(start)
1269
1270 t.Run("debug=1", func(t *testing.T) {
1271 var w strings.Builder
1272 Lookup("mutex").WriteTo(&w, 1)
1273 prof := w.String()
1274 t.Logf("received profile: %v", prof)
1275
1276 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
1277 t.Errorf("Bad profile header:\n%v", prof)
1278 }
1279 prof = strings.Trim(prof, "\n")
1280 lines := strings.Split(prof, "\n")
1281 if len(lines) < 6 {
1282 t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
1283 }
1284
1285 r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
1286 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
1287 t.Errorf("%q didn't match %q", lines[3], r2)
1288 }
1289 r3 := "^#.*runtime/pprof.blockMutex.*$"
1290 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
1291 t.Errorf("%q didn't match %q", lines[5], r3)
1292 }
1293 t.Log(prof)
1294 })
1295 t.Run("proto", func(t *testing.T) {
1296
1297 var w bytes.Buffer
1298 Lookup("mutex").WriteTo(&w, 0)
1299 p, err := profile.Parse(&w)
1300 if err != nil {
1301 t.Fatalf("failed to parse profile: %v", err)
1302 }
1303 t.Logf("parsed proto: %s", p)
1304 if err := p.CheckValid(); err != nil {
1305 t.Fatalf("invalid profile: %v", err)
1306 }
1307
1308 stks := profileStacks(p)
1309 for _, want := range [][]string{
1310 {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
1311 } {
1312 if !containsStack(stks, want) {
1313 t.Errorf("No matching stack entry for %+v", want)
1314 }
1315 }
1316
1317 i := 0
1318 for ; i < len(p.SampleType); i++ {
1319 if p.SampleType[i].Unit == "nanoseconds" {
1320 break
1321 }
1322 }
1323 if i >= len(p.SampleType) {
1324 t.Fatalf("profile did not contain nanoseconds sample")
1325 }
1326 total := int64(0)
1327 for _, s := range p.Sample {
1328 total += s.Value[i]
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338 d := time.Duration(total)
1339 lo := time.Duration(N * D * 9 / 10)
1340 hi := time.Duration(N) * blockMutexNTime * 11 / 10
1341 if d < lo || d > hi {
1342 for _, s := range p.Sample {
1343 t.Logf("sample: %s", time.Duration(s.Value[i]))
1344 }
1345 t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
1346 }
1347 })
1348
1349 t.Run("records", func(t *testing.T) {
1350
1351 var records []runtime.BlockProfileRecord
1352 for {
1353 n, ok := runtime.MutexProfile(records)
1354 if ok {
1355 records = records[:n]
1356 break
1357 }
1358 records = make([]runtime.BlockProfileRecord, n*2)
1359 }
1360
1361
1362
1363
1364 stks := blockRecordStacks(records)
1365 want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
1366 if !containsStack(stks, want) {
1367 t.Errorf("No matching stack entry for %+v", want)
1368 }
1369 })
1370 }
1371
1372 func TestMutexProfileRateAdjust(t *testing.T) {
1373 old := runtime.SetMutexProfileFraction(1)
1374 defer runtime.SetMutexProfileFraction(old)
1375 if old != 0 {
1376 t.Fatalf("need MutexProfileRate 0, got %d", old)
1377 }
1378
1379 readProfile := func() (contentions int64, delay int64) {
1380 var w bytes.Buffer
1381 Lookup("mutex").WriteTo(&w, 0)
1382 p, err := profile.Parse(&w)
1383 if err != nil {
1384 t.Fatalf("failed to parse profile: %v", err)
1385 }
1386 t.Logf("parsed proto: %s", p)
1387 if err := p.CheckValid(); err != nil {
1388 t.Fatalf("invalid profile: %v", err)
1389 }
1390
1391 for _, s := range p.Sample {
1392 var match, runtimeInternal bool
1393 for _, l := range s.Location {
1394 for _, line := range l.Line {
1395 if line.Function.Name == "runtime/pprof.blockMutex.func1" {
1396 match = true
1397 }
1398 if line.Function.Name == "runtime.unlock" {
1399 runtimeInternal = true
1400 }
1401 }
1402 }
1403 if match && !runtimeInternal {
1404 contentions += s.Value[0]
1405 delay += s.Value[1]
1406 }
1407 }
1408 return
1409 }
1410
1411 blockMutex(t)
1412 contentions, delay := readProfile()
1413 if contentions == 0 || delay == 0 {
1414 t.Fatal("did not see expected function in profile")
1415 }
1416 runtime.SetMutexProfileFraction(0)
1417 newContentions, newDelay := readProfile()
1418 if newContentions != contentions || newDelay != delay {
1419 t.Fatalf("sample value changed: got [%d, %d], want [%d, %d]", newContentions, newDelay, contentions, delay)
1420 }
1421 }
1422
1423 func func1(c chan int) { <-c }
1424 func func2(c chan int) { <-c }
1425 func func3(c chan int) { <-c }
1426 func func4(c chan int) { <-c }
1427
1428 func TestGoroutineCounts(t *testing.T) {
1429
1430
1431 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
1432
1433 c := make(chan int)
1434 for i := 0; i < 100; i++ {
1435 switch {
1436 case i%10 == 0:
1437 go func1(c)
1438 case i%2 == 0:
1439 go func2(c)
1440 default:
1441 go func3(c)
1442 }
1443
1444 for j := 0; j < 5; j++ {
1445 runtime.Gosched()
1446 }
1447 }
1448 ctx := context.Background()
1449
1450
1451
1452 Do(ctx, Labels("label", "value"), func(context.Context) {
1453 for i := 0; i < 89; i++ {
1454 switch {
1455 case i%10 == 0:
1456 go func1(c)
1457 case i%2 == 0:
1458 go func2(c)
1459 default:
1460 go func3(c)
1461 }
1462
1463 for j := 0; j < 5; j++ {
1464 runtime.Gosched()
1465 }
1466 }
1467 })
1468
1469 SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
1470 defer SetGoroutineLabels(context.Background())
1471
1472 garbage := new(*int)
1473 fingReady := make(chan struct{})
1474 runtime.SetFinalizer(garbage, func(v **int) {
1475 Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
1476 close(fingReady)
1477 <-c
1478 })
1479 })
1480 garbage = nil
1481 for i := 0; i < 2; i++ {
1482 runtime.GC()
1483 }
1484 <-fingReady
1485
1486 var w bytes.Buffer
1487 goroutineProf := Lookup("goroutine")
1488
1489
1490 goroutineProf.WriteTo(&w, 1)
1491 prof := w.String()
1492
1493 labels := labelMap{"label": "value"}
1494 labelStr := "\n# labels: " + labels.String()
1495 selfLabel := labelMap{"self-label": "self-value"}
1496 selfLabelStr := "\n# labels: " + selfLabel.String()
1497 fingLabel := labelMap{"fing-label": "fing-value"}
1498 fingLabelStr := "\n# labels: " + fingLabel.String()
1499 orderedPrefix := []string{
1500 "\n50 @ ",
1501 "\n44 @", labelStr,
1502 "\n40 @",
1503 "\n36 @", labelStr,
1504 "\n10 @",
1505 "\n9 @", labelStr,
1506 "\n1 @"}
1507 if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
1508 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1509 }
1510 if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
1511 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1512 }
1513
1514
1515 w.Reset()
1516 goroutineProf.WriteTo(&w, 0)
1517 p, err := profile.Parse(&w)
1518 if err != nil {
1519 t.Errorf("error parsing protobuf profile: %v", err)
1520 }
1521 if err := p.CheckValid(); err != nil {
1522 t.Errorf("protobuf profile is invalid: %v", err)
1523 }
1524 expectedLabels := map[int64]map[string]string{
1525 50: {},
1526 44: {"label": "value"},
1527 40: {},
1528 36: {"label": "value"},
1529 10: {},
1530 9: {"label": "value"},
1531 1: {"self-label": "self-value", "fing-label": "fing-value"},
1532 }
1533 if !containsCountsLabels(p, expectedLabels) {
1534 t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
1535 expectedLabels, p)
1536 }
1537
1538 close(c)
1539
1540 time.Sleep(10 * time.Millisecond)
1541 }
1542
1543 func containsInOrder(s string, all ...string) bool {
1544 for _, t := range all {
1545 var ok bool
1546 if _, s, ok = strings.Cut(s, t); !ok {
1547 return false
1548 }
1549 }
1550 return true
1551 }
1552
1553 func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
1554 m := make(map[int64]int)
1555 type nkey struct {
1556 count int64
1557 key, val string
1558 }
1559 n := make(map[nkey]int)
1560 for c, kv := range countLabels {
1561 m[c]++
1562 for k, v := range kv {
1563 n[nkey{
1564 count: c,
1565 key: k,
1566 val: v,
1567 }]++
1568
1569 }
1570 }
1571 for _, s := range prof.Sample {
1572
1573 if len(s.Value) != 1 {
1574 return false
1575 }
1576 m[s.Value[0]]--
1577 for k, vs := range s.Label {
1578 for _, v := range vs {
1579 n[nkey{
1580 count: s.Value[0],
1581 key: k,
1582 val: v,
1583 }]--
1584 }
1585 }
1586 }
1587 for _, n := range m {
1588 if n > 0 {
1589 return false
1590 }
1591 }
1592 for _, ncnt := range n {
1593 if ncnt != 0 {
1594 return false
1595 }
1596 }
1597 return true
1598 }
1599
1600 func TestGoroutineProfileConcurrency(t *testing.T) {
1601 testenv.MustHaveParallelism(t)
1602
1603 goroutineProf := Lookup("goroutine")
1604
1605 profilerCalls := func(s string) int {
1606 return strings.Count(s, "\truntime/pprof.runtime_goroutineProfileWithLabels+")
1607 }
1608
1609 includesFinalizer := func(s string) bool {
1610 return strings.Contains(s, "runtime.runfinq")
1611 }
1612
1613
1614
1615 t.Run("overlapping profile requests", func(t *testing.T) {
1616 ctx := context.Background()
1617 ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
1618 defer cancel()
1619
1620 var wg sync.WaitGroup
1621 for i := 0; i < 2; i++ {
1622 wg.Add(1)
1623 Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
1624 go func() {
1625 defer wg.Done()
1626 for ctx.Err() == nil {
1627 var w strings.Builder
1628 goroutineProf.WriteTo(&w, 1)
1629 prof := w.String()
1630 count := profilerCalls(prof)
1631 if count >= 2 {
1632 t.Logf("prof %d\n%s", count, prof)
1633 cancel()
1634 }
1635 }
1636 }()
1637 })
1638 }
1639 wg.Wait()
1640 })
1641
1642
1643
1644 t.Run("finalizer not present", func(t *testing.T) {
1645 var w strings.Builder
1646 goroutineProf.WriteTo(&w, 1)
1647 prof := w.String()
1648 if includesFinalizer(prof) {
1649 t.Errorf("profile includes finalizer (but finalizer should be marked as system):\n%s", prof)
1650 }
1651 })
1652
1653
1654 t.Run("finalizer present", func(t *testing.T) {
1655
1656
1657
1658 type T *byte
1659 obj := new(T)
1660 ch1, ch2 := make(chan int), make(chan int)
1661 defer close(ch2)
1662 runtime.SetFinalizer(obj, func(_ interface{}) {
1663 close(ch1)
1664 <-ch2
1665 })
1666 obj = nil
1667 for i := 10; i >= 0; i-- {
1668 select {
1669 case <-ch1:
1670 default:
1671 if i == 0 {
1672 t.Fatalf("finalizer did not run")
1673 }
1674 runtime.GC()
1675 }
1676 }
1677 var w strings.Builder
1678 goroutineProf.WriteTo(&w, 1)
1679 prof := w.String()
1680 if !includesFinalizer(prof) {
1681 t.Errorf("profile does not include finalizer (and it should be marked as user):\n%s", prof)
1682 }
1683 })
1684
1685
1686 testLaunches := func(t *testing.T) {
1687 var done sync.WaitGroup
1688 defer done.Wait()
1689
1690 ctx := context.Background()
1691 ctx, cancel := context.WithCancel(ctx)
1692 defer cancel()
1693
1694 ch := make(chan int)
1695 defer close(ch)
1696
1697 var ready sync.WaitGroup
1698
1699
1700
1701
1702 ready.Add(1)
1703 done.Add(1)
1704 go func() {
1705 defer done.Done()
1706 for i := 0; ctx.Err() == nil; i++ {
1707
1708
1709 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-loop-i", fmt.Sprint(i))))
1710 done.Add(1)
1711 go func() {
1712 <-ch
1713 done.Done()
1714 }()
1715 for j := 0; j < i; j++ {
1716
1717
1718
1719
1720
1721
1722 runtime.Gosched()
1723 }
1724 if i == 0 {
1725 ready.Done()
1726 }
1727 }
1728 }()
1729
1730
1731
1732
1733 ready.Add(1)
1734 var churn func(i int)
1735 churn = func(i int) {
1736 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i))))
1737 if i == 0 {
1738 ready.Done()
1739 } else if i%16 == 0 {
1740
1741
1742 runtime.Gosched()
1743 }
1744 if ctx.Err() == nil {
1745 go churn(i + 1)
1746 }
1747 }
1748 go func() {
1749 churn(0)
1750 }()
1751
1752 ready.Wait()
1753
1754 var w [3]bytes.Buffer
1755 for i := range w {
1756 goroutineProf.WriteTo(&w[i], 0)
1757 }
1758 for i := range w {
1759 p, err := profile.Parse(bytes.NewReader(w[i].Bytes()))
1760 if err != nil {
1761 t.Errorf("error parsing protobuf profile: %v", err)
1762 }
1763
1764
1765
1766 counts := make(map[string]int)
1767 for _, s := range p.Sample {
1768 label := s.Label[t.Name()+"-loop-i"]
1769 if len(label) > 0 {
1770 counts[label[0]]++
1771 }
1772 }
1773 for j, max := 0, len(counts)-1; j <= max; j++ {
1774 n := counts[fmt.Sprint(j)]
1775 if n == 1 || (n == 2 && j == max) {
1776 continue
1777 }
1778 t.Errorf("profile #%d's goroutines with label loop-i:%d; %d != 1 (or 2 for the last entry, %d)",
1779 i+1, j, n, max)
1780 t.Logf("counts %v", counts)
1781 break
1782 }
1783 }
1784 }
1785
1786 runs := 100
1787 if testing.Short() {
1788 runs = 5
1789 }
1790 for i := 0; i < runs; i++ {
1791
1792 t.Run("goroutine launches", testLaunches)
1793 }
1794 }
1795
1796
1797 func TestGoroutineProfileCoro(t *testing.T) {
1798 testenv.MustHaveParallelism(t)
1799
1800 goroutineProf := Lookup("goroutine")
1801
1802
1803 iterFunc := func() {
1804 p, stop := iter.Pull2(
1805 func(yield func(int, int) bool) {
1806 for i := 0; i < 10000; i++ {
1807 if !yield(i, i) {
1808 return
1809 }
1810 }
1811 },
1812 )
1813 defer stop()
1814 for {
1815 _, _, ok := p()
1816 if !ok {
1817 break
1818 }
1819 }
1820 }
1821 var wg sync.WaitGroup
1822 done := make(chan struct{})
1823 wg.Add(1)
1824 go func() {
1825 defer wg.Done()
1826 for {
1827 iterFunc()
1828 select {
1829 case <-done:
1830 default:
1831 }
1832 }
1833 }()
1834
1835
1836
1837 goroutineProf.WriteTo(io.Discard, 1)
1838 }
1839
1840 func BenchmarkGoroutine(b *testing.B) {
1841 withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
1842 return func(b *testing.B) {
1843 c := make(chan int)
1844 var ready, done sync.WaitGroup
1845 defer func() {
1846 close(c)
1847 done.Wait()
1848 }()
1849
1850 for i := 0; i < n; i++ {
1851 ready.Add(1)
1852 done.Add(1)
1853 go func() {
1854 ready.Done()
1855 <-c
1856 done.Done()
1857 }()
1858 }
1859
1860 ready.Wait()
1861 for i := 0; i < 5; i++ {
1862 runtime.Gosched()
1863 }
1864
1865 fn(b)
1866 }
1867 }
1868
1869 withChurn := func(fn func(b *testing.B)) func(b *testing.B) {
1870 return func(b *testing.B) {
1871 ctx := context.Background()
1872 ctx, cancel := context.WithCancel(ctx)
1873 defer cancel()
1874
1875 var ready sync.WaitGroup
1876 ready.Add(1)
1877 var count int64
1878 var churn func(i int)
1879 churn = func(i int) {
1880 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
1881 atomic.AddInt64(&count, 1)
1882 if i == 0 {
1883 ready.Done()
1884 }
1885 if ctx.Err() == nil {
1886 go churn(i + 1)
1887 }
1888 }
1889 go func() {
1890 churn(0)
1891 }()
1892 ready.Wait()
1893
1894 fn(b)
1895 b.ReportMetric(float64(atomic.LoadInt64(&count))/float64(b.N), "concurrent_launches/op")
1896 }
1897 }
1898
1899 benchWriteTo := func(b *testing.B) {
1900 goroutineProf := Lookup("goroutine")
1901 b.ResetTimer()
1902 for i := 0; i < b.N; i++ {
1903 goroutineProf.WriteTo(io.Discard, 0)
1904 }
1905 b.StopTimer()
1906 }
1907
1908 benchGoroutineProfile := func(b *testing.B) {
1909 p := make([]runtime.StackRecord, 10000)
1910 b.ResetTimer()
1911 for i := 0; i < b.N; i++ {
1912 runtime.GoroutineProfile(p)
1913 }
1914 b.StopTimer()
1915 }
1916
1917
1918
1919
1920 for _, n := range []int{50, 500, 5000} {
1921 b.Run(fmt.Sprintf("Profile.WriteTo idle %d", n), withIdle(n, benchWriteTo))
1922 b.Run(fmt.Sprintf("Profile.WriteTo churn %d", n), withIdle(n, withChurn(benchWriteTo)))
1923 b.Run(fmt.Sprintf("runtime.GoroutineProfile churn %d", n), withIdle(n, withChurn(benchGoroutineProfile)))
1924 }
1925 }
1926
1927 var emptyCallStackTestRun int64
1928
1929
1930 func TestEmptyCallStack(t *testing.T) {
1931 name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
1932 emptyCallStackTestRun++
1933
1934 t.Parallel()
1935 var buf strings.Builder
1936 p := NewProfile(name)
1937
1938 p.Add("foo", 47674)
1939 p.WriteTo(&buf, 1)
1940 p.Remove("foo")
1941 got := buf.String()
1942 prefix := name + " profile: total 1\n"
1943 if !strings.HasPrefix(got, prefix) {
1944 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
1945 }
1946 lostevent := "lostProfileEvent"
1947 if !strings.Contains(got, lostevent) {
1948 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
1949 }
1950 }
1951
1952
1953
1954 func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
1955 base, kv, ok := strings.Cut(spec, ";")
1956 if !ok {
1957 panic("no semicolon in key/value spec")
1958 }
1959 k, v, ok := strings.Cut(kv, "=")
1960 if !ok {
1961 panic("missing = in key/value spec")
1962 }
1963 if !contains(labels[k], v) {
1964 return false
1965 }
1966 return stackContains(base, count, stk, labels)
1967 }
1968
1969 func TestCPUProfileLabel(t *testing.T) {
1970 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
1971 testCPUProfile(t, matches, func(dur time.Duration) {
1972 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1973 cpuHogger(cpuHog1, &salt1, dur)
1974 })
1975 })
1976 }
1977
1978 func TestLabelRace(t *testing.T) {
1979 testenv.MustHaveParallelism(t)
1980
1981
1982
1983 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
1984 testCPUProfile(t, matches, func(dur time.Duration) {
1985 start := time.Now()
1986 var wg sync.WaitGroup
1987 for time.Since(start) < dur {
1988 var salts [10]int
1989 for i := 0; i < 10; i++ {
1990 wg.Add(1)
1991 go func(j int) {
1992 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1993 cpuHogger(cpuHog1, &salts[j], time.Millisecond)
1994 })
1995 wg.Done()
1996 }(i)
1997 }
1998 wg.Wait()
1999 }
2000 })
2001 }
2002
2003 func TestGoroutineProfileLabelRace(t *testing.T) {
2004 testenv.MustHaveParallelism(t)
2005
2006
2007
2008
2009 t.Run("reset", func(t *testing.T) {
2010 ctx := context.Background()
2011 ctx, cancel := context.WithCancel(ctx)
2012 defer cancel()
2013
2014 go func() {
2015 goroutineProf := Lookup("goroutine")
2016 for ctx.Err() == nil {
2017 var w strings.Builder
2018 goroutineProf.WriteTo(&w, 1)
2019 prof := w.String()
2020 if strings.Contains(prof, "loop-i") {
2021 cancel()
2022 }
2023 }
2024 }()
2025
2026 for i := 0; ctx.Err() == nil; i++ {
2027 Do(ctx, Labels("loop-i", fmt.Sprint(i)), func(ctx context.Context) {
2028 })
2029 }
2030 })
2031
2032 t.Run("churn", func(t *testing.T) {
2033 ctx := context.Background()
2034 ctx, cancel := context.WithCancel(ctx)
2035 defer cancel()
2036
2037 var ready sync.WaitGroup
2038 ready.Add(1)
2039 var churn func(i int)
2040 churn = func(i int) {
2041 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
2042 if i == 0 {
2043 ready.Done()
2044 }
2045 if ctx.Err() == nil {
2046 go churn(i + 1)
2047 }
2048 }
2049 go func() {
2050 churn(0)
2051 }()
2052 ready.Wait()
2053
2054 goroutineProf := Lookup("goroutine")
2055 for i := 0; i < 10; i++ {
2056 goroutineProf.WriteTo(io.Discard, 1)
2057 }
2058 })
2059 }
2060
2061
2062
2063 func TestLabelSystemstack(t *testing.T) {
2064
2065
2066 gogc := debug.SetGCPercent(100)
2067 debug.SetGCPercent(gogc)
2068
2069 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
2070 p := testCPUProfile(t, matches, func(dur time.Duration) {
2071 Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
2072 parallelLabelHog(ctx, dur, gogc)
2073 })
2074 })
2075
2076
2077
2078
2079 for _, s := range p.Sample {
2080 isLabeled := s.Label != nil && contains(s.Label["key"], "value")
2081 var (
2082 mayBeLabeled bool
2083 mustBeLabeled string
2084 mustNotBeLabeled string
2085 )
2086 for _, loc := range s.Location {
2087 for _, l := range loc.Line {
2088 switch l.Function.Name {
2089 case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
2090 mustBeLabeled = l.Function.Name
2091 case "runtime/pprof.Do":
2092
2093
2094
2095
2096 mayBeLabeled = true
2097 case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
2098
2099
2100
2101
2102 mustNotBeLabeled = l.Function.Name
2103 case "gogo", "gosave_systemstack_switch", "racecall":
2104
2105
2106
2107
2108
2109
2110 mayBeLabeled = true
2111 }
2112
2113 if strings.HasPrefix(l.Function.Name, "runtime.") {
2114
2115
2116
2117
2118
2119
2120 mayBeLabeled = true
2121 }
2122 }
2123 }
2124 errorStack := func(f string, args ...any) {
2125 var buf strings.Builder
2126 fprintStack(&buf, s.Location)
2127 t.Errorf("%s: %s", fmt.Sprintf(f, args...), buf.String())
2128 }
2129 if mustBeLabeled != "" && mustNotBeLabeled != "" {
2130 errorStack("sample contains both %s, which must be labeled, and %s, which must not be labeled", mustBeLabeled, mustNotBeLabeled)
2131 continue
2132 }
2133 if mustBeLabeled != "" || mustNotBeLabeled != "" {
2134
2135 mayBeLabeled = false
2136 }
2137 if mayBeLabeled {
2138
2139 continue
2140 }
2141 if mustBeLabeled != "" && !isLabeled {
2142 errorStack("sample must be labeled because of %s, but is not", mustBeLabeled)
2143 }
2144 if mustNotBeLabeled != "" && isLabeled {
2145 errorStack("sample must not be labeled because of %s, but is", mustNotBeLabeled)
2146 }
2147 }
2148 }
2149
2150
2151
2152 func labelHog(stop chan struct{}, gogc int) {
2153
2154
2155 runtime.GC()
2156
2157 for i := 0; ; i++ {
2158 select {
2159 case <-stop:
2160 return
2161 default:
2162 debug.SetGCPercent(gogc)
2163 }
2164 }
2165 }
2166
2167
2168 func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
2169 var wg sync.WaitGroup
2170 stop := make(chan struct{})
2171 for i := 0; i < runtime.GOMAXPROCS(0); i++ {
2172 wg.Add(1)
2173 go func() {
2174 defer wg.Done()
2175 labelHog(stop, gogc)
2176 }()
2177 }
2178
2179 time.Sleep(dur)
2180 close(stop)
2181 wg.Wait()
2182 }
2183
2184
2185
2186 func TestAtomicLoadStore64(t *testing.T) {
2187 f, err := os.CreateTemp("", "profatomic")
2188 if err != nil {
2189 t.Fatalf("TempFile: %v", err)
2190 }
2191 defer os.Remove(f.Name())
2192 defer f.Close()
2193
2194 if err := StartCPUProfile(f); err != nil {
2195 t.Fatal(err)
2196 }
2197 defer StopCPUProfile()
2198
2199 var flag uint64
2200 done := make(chan bool, 1)
2201
2202 go func() {
2203 for atomic.LoadUint64(&flag) == 0 {
2204 runtime.Gosched()
2205 }
2206 done <- true
2207 }()
2208 time.Sleep(50 * time.Millisecond)
2209 atomic.StoreUint64(&flag, 1)
2210 <-done
2211 }
2212
2213 func TestTracebackAll(t *testing.T) {
2214
2215
2216 f, err := os.CreateTemp("", "proftraceback")
2217 if err != nil {
2218 t.Fatalf("TempFile: %v", err)
2219 }
2220 defer os.Remove(f.Name())
2221 defer f.Close()
2222
2223 if err := StartCPUProfile(f); err != nil {
2224 t.Fatal(err)
2225 }
2226 defer StopCPUProfile()
2227
2228 ch := make(chan int)
2229 defer close(ch)
2230
2231 count := 10
2232 for i := 0; i < count; i++ {
2233 go func() {
2234 <-ch
2235 }()
2236 }
2237
2238 N := 10000
2239 if testing.Short() {
2240 N = 500
2241 }
2242 buf := make([]byte, 10*1024)
2243 for i := 0; i < N; i++ {
2244 runtime.Stack(buf, true)
2245 }
2246 }
2247
2248
2249
2250
2251
2252
2253
2254 func TestTryAdd(t *testing.T) {
2255 if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
2256 t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
2257 }
2258
2259
2260
2261 pcs := make([]uintptr, 2)
2262 inlinedCallerDump(pcs)
2263 inlinedCallerStack := make([]uint64, 2)
2264 for i := range pcs {
2265 inlinedCallerStack[i] = uint64(pcs[i])
2266 }
2267 wrapperPCs := make([]uintptr, 1)
2268 inlinedWrapperCallerDump(wrapperPCs)
2269
2270 if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
2271 t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
2272 }
2273
2274
2275
2276
2277
2278
2279
2280 pcs = make([]uintptr, 6)
2281 recursionChainTop(1, pcs)
2282 recursionStack := make([]uint64, len(pcs))
2283 for i := range pcs {
2284 recursionStack[i] = uint64(pcs[i])
2285 }
2286
2287 period := int64(2000 * 1000)
2288
2289 testCases := []struct {
2290 name string
2291 input []uint64
2292 count int
2293 wantLocs [][]string
2294 wantSamples []*profile.Sample
2295 }{{
2296
2297 name: "full_stack_trace",
2298 input: []uint64{
2299 3, 0, 500,
2300 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2301 },
2302 count: 2,
2303 wantLocs: [][]string{
2304 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2305 },
2306 wantSamples: []*profile.Sample{
2307 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2308 },
2309 }, {
2310 name: "bug35538",
2311 input: []uint64{
2312 3, 0, 500,
2313
2314
2315
2316 7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
2317 5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
2318 },
2319 count: 3,
2320 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2321 wantSamples: []*profile.Sample{
2322 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
2323 {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
2324 },
2325 }, {
2326 name: "bug38096",
2327 input: []uint64{
2328 3, 0, 500,
2329
2330
2331 4, 0, 0, 4242,
2332 },
2333 count: 2,
2334 wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
2335 wantSamples: []*profile.Sample{
2336 {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
2337 },
2338 }, {
2339
2340
2341
2342
2343
2344
2345
2346 name: "directly_recursive_func_is_not_inlined",
2347 input: []uint64{
2348 3, 0, 500,
2349 5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
2350 4, 0, 40, inlinedCallerStack[0],
2351 },
2352 count: 3,
2353
2354
2355 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
2356 wantSamples: []*profile.Sample{
2357 {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
2358 {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
2359 },
2360 }, {
2361 name: "recursion_chain_inline",
2362 input: []uint64{
2363 3, 0, 500,
2364 9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
2365 },
2366 count: 2,
2367 wantLocs: [][]string{
2368 {"runtime/pprof.recursionChainBottom"},
2369 {
2370 "runtime/pprof.recursionChainMiddle",
2371 "runtime/pprof.recursionChainTop",
2372 "runtime/pprof.recursionChainBottom",
2373 },
2374 {
2375 "runtime/pprof.recursionChainMiddle",
2376 "runtime/pprof.recursionChainTop",
2377 "runtime/pprof.TestTryAdd",
2378 },
2379 },
2380 wantSamples: []*profile.Sample{
2381 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
2382 },
2383 }, {
2384 name: "truncated_stack_trace_later",
2385 input: []uint64{
2386 3, 0, 500,
2387 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2388 4, 0, 60, inlinedCallerStack[0],
2389 },
2390 count: 3,
2391 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2392 wantSamples: []*profile.Sample{
2393 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2394 {Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
2395 },
2396 }, {
2397 name: "truncated_stack_trace_first",
2398 input: []uint64{
2399 3, 0, 500,
2400 4, 0, 70, inlinedCallerStack[0],
2401 5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
2402 },
2403 count: 3,
2404 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2405 wantSamples: []*profile.Sample{
2406 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2407 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
2408 },
2409 }, {
2410
2411 name: "truncated_stack_trace_only",
2412 input: []uint64{
2413 3, 0, 500,
2414 4, 0, 70, inlinedCallerStack[0],
2415 },
2416 count: 2,
2417 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2418 wantSamples: []*profile.Sample{
2419 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2420 },
2421 }, {
2422
2423 name: "truncated_stack_trace_twice",
2424 input: []uint64{
2425 3, 0, 500,
2426 4, 0, 70, inlinedCallerStack[0],
2427
2428
2429
2430 5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
2431 },
2432 count: 3,
2433 wantLocs: [][]string{
2434 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2435 {"runtime/pprof.inlinedCallerDump"},
2436 },
2437 wantSamples: []*profile.Sample{
2438 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2439 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
2440 },
2441 }, {
2442 name: "expand_wrapper_function",
2443 input: []uint64{
2444 3, 0, 500,
2445 4, 0, 50, uint64(wrapperPCs[0]),
2446 },
2447 count: 2,
2448 wantLocs: [][]string{{"runtime/pprof.inlineWrapper.dump"}},
2449 wantSamples: []*profile.Sample{
2450 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2451 },
2452 }}
2453
2454 for _, tc := range testCases {
2455 t.Run(tc.name, func(t *testing.T) {
2456 p, err := translateCPUProfile(tc.input, tc.count)
2457 if err != nil {
2458 t.Fatalf("translating profile: %v", err)
2459 }
2460 t.Logf("Profile: %v\n", p)
2461
2462
2463 var gotLoc [][]string
2464 for _, loc := range p.Location {
2465 var names []string
2466 for _, line := range loc.Line {
2467 names = append(names, line.Function.Name)
2468 }
2469 gotLoc = append(gotLoc, names)
2470 }
2471 if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
2472 t.Errorf("Got Location = %+v\n\twant %+v", got, want)
2473 }
2474
2475 var gotSamples []*profile.Sample
2476 for _, sample := range p.Sample {
2477 var locs []*profile.Location
2478 for _, loc := range sample.Location {
2479 locs = append(locs, &profile.Location{ID: loc.ID})
2480 }
2481 gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
2482 }
2483 if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
2484 t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
2485 }
2486 })
2487 }
2488 }
2489
2490 func TestTimeVDSO(t *testing.T) {
2491
2492
2493
2494 if runtime.GOOS == "android" {
2495
2496 testenv.SkipFlaky(t, 48655)
2497 }
2498
2499 matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
2500 p := testCPUProfile(t, matches, func(dur time.Duration) {
2501 t0 := time.Now()
2502 for {
2503 t := time.Now()
2504 if t.Sub(t0) >= dur {
2505 return
2506 }
2507 }
2508 })
2509
2510
2511 for _, sample := range p.Sample {
2512 var seenNow bool
2513 for _, loc := range sample.Location {
2514 for _, line := range loc.Line {
2515 if line.Function.Name == "time.now" {
2516 if seenNow {
2517 t.Fatalf("unexpected recursive time.now")
2518 }
2519 seenNow = true
2520 }
2521 }
2522 }
2523 }
2524 }
2525
2526 func TestProfilerStackDepth(t *testing.T) {
2527 t.Cleanup(disableSampling())
2528
2529 const depth = 128
2530 go produceProfileEvents(t, depth)
2531 awaitBlockedGoroutine(t, "chan receive", "goroutineDeep", 1)
2532
2533 tests := []struct {
2534 profiler string
2535 prefix []string
2536 }{
2537 {"heap", []string{"runtime/pprof.allocDeep"}},
2538 {"block", []string{"runtime.chanrecv1", "runtime/pprof.blockChanDeep"}},
2539 {"mutex", []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexDeep"}},
2540 {"goroutine", []string{"runtime.gopark", "runtime.chanrecv", "runtime.chanrecv1", "runtime/pprof.goroutineDeep"}},
2541 }
2542
2543 for _, test := range tests {
2544 t.Run(test.profiler, func(t *testing.T) {
2545 var buf bytes.Buffer
2546 if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil {
2547 t.Fatalf("failed to write heap profile: %v", err)
2548 }
2549 p, err := profile.Parse(&buf)
2550 if err != nil {
2551 t.Fatalf("failed to parse heap profile: %v", err)
2552 }
2553 t.Logf("Profile = %v", p)
2554
2555 stks := profileStacks(p)
2556 var stk []string
2557 for _, s := range stks {
2558 if hasPrefix(s, test.prefix) {
2559 stk = s
2560 break
2561 }
2562 }
2563 if len(stk) != depth {
2564 t.Fatalf("want stack depth = %d, got %d", depth, len(stk))
2565 }
2566
2567 if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
2568 t.Fatalf("want stack stack root %s, got %v", wantFn, rootFn)
2569 }
2570 })
2571 }
2572 }
2573
2574 func hasPrefix(stk []string, prefix []string) bool {
2575 if len(prefix) > len(stk) {
2576 return false
2577 }
2578 for i := range prefix {
2579 if stk[i] != prefix[i] {
2580 return false
2581 }
2582 }
2583 return true
2584 }
2585
2586
2587 var _ = map[runtime.MemProfileRecord]struct{}{}
2588 var _ = map[runtime.StackRecord]struct{}{}
2589
2590
2591 func allocDeep(n int) {
2592 if n > 1 {
2593 allocDeep(n - 1)
2594 return
2595 }
2596 memSink = make([]byte, 1<<20)
2597 }
2598
2599
2600
2601 func blockChanDeep(t *testing.T, n int) {
2602 if n > 1 {
2603 blockChanDeep(t, n-1)
2604 return
2605 }
2606 ch := make(chan struct{})
2607 go func() {
2608 awaitBlockedGoroutine(t, "chan receive", "blockChanDeep", 1)
2609 ch <- struct{}{}
2610 }()
2611 <-ch
2612 }
2613
2614
2615
2616 func blockMutexDeep(t *testing.T, n int) {
2617 if n > 1 {
2618 blockMutexDeep(t, n-1)
2619 return
2620 }
2621 var mu sync.Mutex
2622 go func() {
2623 mu.Lock()
2624 mu.Lock()
2625 }()
2626 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutexDeep", 1)
2627 mu.Unlock()
2628 }
2629
2630
2631
2632 func goroutineDeep(t *testing.T, n int) {
2633 if n > 1 {
2634 goroutineDeep(t, n-1)
2635 return
2636 }
2637 wait := make(chan struct{}, 1)
2638 t.Cleanup(func() {
2639 wait <- struct{}{}
2640 })
2641 <-wait
2642 }
2643
2644
2645
2646
2647
2648 func produceProfileEvents(t *testing.T, depth int) {
2649 allocDeep(depth - 1)
2650 blockChanDeep(t, depth-2)
2651 blockMutexDeep(t, depth-2)
2652 memSink = nil
2653 runtime.GC()
2654 goroutineDeep(t, depth-4)
2655 }
2656
2657 func getProfileStacks(collect func([]runtime.BlockProfileRecord) (int, bool), fileLine bool) []string {
2658 var n int
2659 var ok bool
2660 var p []runtime.BlockProfileRecord
2661 for {
2662 p = make([]runtime.BlockProfileRecord, n)
2663 n, ok = collect(p)
2664 if ok {
2665 p = p[:n]
2666 break
2667 }
2668 }
2669 var stacks []string
2670 for _, r := range p {
2671 var stack strings.Builder
2672 for i, pc := range r.Stack() {
2673 if i > 0 {
2674 stack.WriteByte('\n')
2675 }
2676
2677
2678
2679
2680
2681 f := runtime.FuncForPC(pc - 1)
2682 stack.WriteString(f.Name())
2683 if fileLine {
2684 stack.WriteByte(' ')
2685 file, line := f.FileLine(pc - 1)
2686 stack.WriteString(file)
2687 stack.WriteByte(':')
2688 stack.WriteString(strconv.Itoa(line))
2689 }
2690 }
2691 stacks = append(stacks, stack.String())
2692 }
2693 return stacks
2694 }
2695
2696 func TestMutexBlockFullAggregation(t *testing.T) {
2697
2698
2699
2700
2701 var m sync.Mutex
2702
2703 prev := runtime.SetMutexProfileFraction(-1)
2704 defer runtime.SetMutexProfileFraction(prev)
2705
2706 const fraction = 1
2707 const iters = 100
2708 const workers = 2
2709
2710 runtime.SetMutexProfileFraction(fraction)
2711 runtime.SetBlockProfileRate(1)
2712 defer runtime.SetBlockProfileRate(0)
2713
2714 wg := sync.WaitGroup{}
2715 wg.Add(workers)
2716 for j := 0; j < workers; j++ {
2717 go func() {
2718 for i := 0; i < iters; i++ {
2719 m.Lock()
2720
2721
2722 time.Sleep(time.Millisecond)
2723 m.Unlock()
2724 }
2725 wg.Done()
2726 }()
2727 }
2728 wg.Wait()
2729
2730 assertNoDuplicates := func(name string, collect func([]runtime.BlockProfileRecord) (int, bool)) {
2731 stacks := getProfileStacks(collect, true)
2732 seen := make(map[string]struct{})
2733 for _, s := range stacks {
2734 if _, ok := seen[s]; ok {
2735 t.Errorf("saw duplicate entry in %s profile with stack:\n%s", name, s)
2736 }
2737 seen[s] = struct{}{}
2738 }
2739 if len(seen) == 0 {
2740 t.Errorf("did not see any samples in %s profile for this test", name)
2741 }
2742 }
2743 t.Run("mutex", func(t *testing.T) {
2744 assertNoDuplicates("mutex", runtime.MutexProfile)
2745 })
2746 t.Run("block", func(t *testing.T) {
2747 assertNoDuplicates("block", runtime.BlockProfile)
2748 })
2749 }
2750
2751 func inlineA(mu *sync.Mutex, wg *sync.WaitGroup) { inlineB(mu, wg) }
2752 func inlineB(mu *sync.Mutex, wg *sync.WaitGroup) { inlineC(mu, wg) }
2753 func inlineC(mu *sync.Mutex, wg *sync.WaitGroup) {
2754 defer wg.Done()
2755 mu.Lock()
2756 mu.Unlock()
2757 }
2758
2759 func inlineD(mu *sync.Mutex, wg *sync.WaitGroup) { inlineE(mu, wg) }
2760 func inlineE(mu *sync.Mutex, wg *sync.WaitGroup) { inlineF(mu, wg) }
2761 func inlineF(mu *sync.Mutex, wg *sync.WaitGroup) {
2762 defer wg.Done()
2763 mu.Unlock()
2764 }
2765
2766 func TestBlockMutexProfileInlineExpansion(t *testing.T) {
2767 runtime.SetBlockProfileRate(1)
2768 defer runtime.SetBlockProfileRate(0)
2769 prev := runtime.SetMutexProfileFraction(1)
2770 defer runtime.SetMutexProfileFraction(prev)
2771
2772 var mu sync.Mutex
2773 var wg sync.WaitGroup
2774 wg.Add(2)
2775 mu.Lock()
2776 go inlineA(&mu, &wg)
2777 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "inlineC", 1)
2778
2779 go inlineD(&mu, &wg)
2780 wg.Wait()
2781
2782 tcs := []struct {
2783 Name string
2784 Collect func([]runtime.BlockProfileRecord) (int, bool)
2785 SubStack string
2786 }{
2787 {
2788 Name: "mutex",
2789 Collect: runtime.MutexProfile,
2790 SubStack: `sync.(*Mutex).Unlock
2791 runtime/pprof.inlineF
2792 runtime/pprof.inlineE
2793 runtime/pprof.inlineD`,
2794 },
2795 {
2796 Name: "block",
2797 Collect: runtime.BlockProfile,
2798 SubStack: `sync.(*Mutex).Lock
2799 runtime/pprof.inlineC
2800 runtime/pprof.inlineB
2801 runtime/pprof.inlineA`,
2802 },
2803 }
2804
2805 for _, tc := range tcs {
2806 t.Run(tc.Name, func(t *testing.T) {
2807 stacks := getProfileStacks(tc.Collect, false)
2808 for _, s := range stacks {
2809 if strings.Contains(s, tc.SubStack) {
2810 return
2811 }
2812 }
2813 t.Error("did not see expected stack")
2814 t.Logf("wanted:\n%s", tc.SubStack)
2815 t.Logf("got: %s", stacks)
2816 })
2817 }
2818 }
2819
2820 func TestProfileRecordNullPadding(t *testing.T) {
2821
2822 t.Cleanup(disableSampling())
2823 memSink = make([]byte, 1)
2824 <-time.After(time.Millisecond)
2825 blockMutex(t)
2826 runtime.GC()
2827
2828
2829 testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
2830 testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
2831 testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
2832 testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
2833 return runtime.MemProfile(p, true)
2834 })
2835 testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
2836 return runtime.MemProfile(p, false)
2837 })
2838
2839 }
2840
2841 func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
2842 stack0 := func(sr *T) *[32]uintptr {
2843 switch t := any(sr).(type) {
2844 case *runtime.StackRecord:
2845 return &t.Stack0
2846 case *runtime.MemProfileRecord:
2847 return &t.Stack0
2848 case *runtime.BlockProfileRecord:
2849 return &t.Stack0
2850 default:
2851 panic(fmt.Sprintf("unexpected type %T", sr))
2852 }
2853 }
2854
2855 t.Run(name, func(t *testing.T) {
2856 var p []T
2857 for {
2858 n, ok := fn(p)
2859 if ok {
2860 p = p[:n]
2861 break
2862 }
2863 p = make([]T, n*2)
2864 for i := range p {
2865 s0 := stack0(&p[i])
2866 for j := range s0 {
2867
2868 s0[j] = ^uintptr(0)
2869 }
2870 }
2871 }
2872
2873 if len(p) == 0 {
2874 t.Fatal("no records found")
2875 }
2876
2877 for _, sr := range p {
2878 for i, v := range stack0(&sr) {
2879 if v == ^uintptr(0) {
2880 t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
2881 }
2882 }
2883 }
2884 })
2885 }
2886
2887
2888
2889 func disableSampling() func() {
2890 oldMemRate := runtime.MemProfileRate
2891 runtime.MemProfileRate = 1
2892 runtime.SetBlockProfileRate(1)
2893 oldMutexRate := runtime.SetMutexProfileFraction(1)
2894 return func() {
2895 runtime.MemProfileRate = oldMemRate
2896 runtime.SetBlockProfileRate(0)
2897 runtime.SetMutexProfileFraction(oldMutexRate)
2898 }
2899 }
2900
View as plain text