1
2
3
4
5
6
7 package pprof
8
9 import (
10 "bytes"
11 "context"
12 "fmt"
13 "internal/abi"
14 "internal/profile"
15 "internal/syscall/unix"
16 "internal/testenv"
17 "io"
18 "iter"
19 "math"
20 "math/big"
21 "os"
22 "regexp"
23 "runtime"
24 "runtime/debug"
25 "slices"
26 "strconv"
27 "strings"
28 "sync"
29 "sync/atomic"
30 "testing"
31 "time"
32 _ "unsafe"
33 )
34
35 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
36
37
38
39
40
41 t0 := time.Now()
42 accum := *y
43 for i := 0; i < 500 || time.Since(t0) < dur; i++ {
44 accum = f(accum)
45 }
46 *y = accum
47 }
48
49 var (
50 salt1 = 0
51 salt2 = 0
52 )
53
54
55
56
57 func cpuHog1(x int) int {
58 return cpuHog0(x, 1e5)
59 }
60
61 func cpuHog0(x, n int) int {
62 foo := x
63 for i := 0; i < n; i++ {
64 if foo > 0 {
65 foo *= foo
66 } else {
67 foo *= foo + 1
68 }
69 }
70 return foo
71 }
72
73 func cpuHog2(x int) int {
74 foo := x
75 for i := 0; i < 1e5; i++ {
76 if foo > 0 {
77 foo *= foo
78 } else {
79 foo *= foo + 2
80 }
81 }
82 return foo
83 }
84
85
86
87 func avoidFunctions() []string {
88 if runtime.Compiler == "gccgo" {
89 return []string{"runtime.sigprof"}
90 }
91 return nil
92 }
93
94 func TestCPUProfile(t *testing.T) {
95 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
96 testCPUProfile(t, matches, func(dur time.Duration) {
97 cpuHogger(cpuHog1, &salt1, dur)
98 })
99 }
100
101 func TestCPUProfileMultithreaded(t *testing.T) {
102 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
103 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
104 testCPUProfile(t, matches, func(dur time.Duration) {
105 c := make(chan int)
106 go func() {
107 cpuHogger(cpuHog1, &salt1, dur)
108 c <- 1
109 }()
110 cpuHogger(cpuHog2, &salt2, dur)
111 <-c
112 })
113 }
114
115 func TestCPUProfileMultithreadMagnitude(t *testing.T) {
116 if runtime.GOOS != "linux" {
117 t.Skip("issue 35057 is only confirmed on Linux")
118 }
119
120
121
122 major, minor := unix.KernelVersion()
123 t.Logf("Running on Linux %d.%d", major, minor)
124 defer func() {
125 if t.Failed() {
126 t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
127 }
128 }()
129
130
131
132
133 if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
134 have59 := major > 5 || (major == 5 && minor >= 9)
135 have516 := major > 5 || (major == 5 && minor >= 16)
136 if have59 && !have516 {
137 testenv.SkipFlaky(t, 49065)
138 }
139 }
140
141
142
143
144
145
146
147
148
149
150
151
152 maxDiff := 0.10
153 if testing.Short() {
154 maxDiff = 0.40
155 }
156
157 compare := func(a, b time.Duration, maxDiff float64) error {
158 if a <= 0 || b <= 0 {
159 return fmt.Errorf("Expected both time reports to be positive")
160 }
161
162 if a < b {
163 a, b = b, a
164 }
165
166 diff := float64(a-b) / float64(a)
167 if diff > maxDiff {
168 return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
169 }
170
171 return nil
172 }
173
174 for _, tc := range []struct {
175 name string
176 workers int
177 }{
178 {
179 name: "serial",
180 workers: 1,
181 },
182 {
183 name: "parallel",
184 workers: runtime.GOMAXPROCS(0),
185 },
186 } {
187
188 t.Run(tc.name, func(t *testing.T) {
189 t.Logf("Running with %d workers", tc.workers)
190
191 var userTime, systemTime time.Duration
192 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
193 acceptProfile := func(t *testing.T, p *profile.Profile) bool {
194 if !matches(t, p) {
195 return false
196 }
197
198 ok := true
199 for i, unit := range []string{"count", "nanoseconds"} {
200 if have, want := p.SampleType[i].Unit, unit; have != want {
201 t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
202 ok = false
203 }
204 }
205
206
207
208
209
210
211 var value time.Duration
212 for _, sample := range p.Sample {
213 value += time.Duration(sample.Value[1]) * time.Nanosecond
214 }
215
216 totalTime := userTime + systemTime
217 t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
218 if err := compare(totalTime, value, maxDiff); err != nil {
219 t.Logf("compare got %v want nil", err)
220 ok = false
221 }
222
223 return ok
224 }
225
226 testCPUProfile(t, acceptProfile, func(dur time.Duration) {
227 userTime, systemTime = diffCPUTime(t, func() {
228 var wg sync.WaitGroup
229 var once sync.Once
230 for i := 0; i < tc.workers; i++ {
231 wg.Add(1)
232 go func() {
233 defer wg.Done()
234 var salt = 0
235 cpuHogger(cpuHog1, &salt, dur)
236 once.Do(func() { salt1 = salt })
237 }()
238 }
239 wg.Wait()
240 })
241 })
242 })
243 }
244 }
245
246
247
248 func containsInlinedCall(f any, maxBytes int) bool {
249 _, found := findInlinedCall(f, maxBytes)
250 return found
251 }
252
253
254
255 func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
256 fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
257 if fFunc == nil || fFunc.Entry() == 0 {
258 panic("failed to locate function entry")
259 }
260
261 for offset := 0; offset < maxBytes; offset++ {
262 innerPC := fFunc.Entry() + uintptr(offset)
263 inner := runtime.FuncForPC(innerPC)
264 if inner == nil {
265
266
267 continue
268 }
269 if inner.Entry() != fFunc.Entry() {
270
271 break
272 }
273 if inner.Name() != fFunc.Name() {
274
275
276 return uint64(innerPC), true
277 }
278 }
279
280 return 0, false
281 }
282
283 func TestCPUProfileInlining(t *testing.T) {
284 if !containsInlinedCall(inlinedCaller, 4<<10) {
285 t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
286 }
287
288 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
289 p := testCPUProfile(t, matches, func(dur time.Duration) {
290 cpuHogger(inlinedCaller, &salt1, dur)
291 })
292
293
294 for _, loc := range p.Location {
295 hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
296 for _, line := range loc.Line {
297 if line.Function.Name == "runtime/pprof.inlinedCallee" {
298 hasInlinedCallee = true
299 }
300 if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
301 hasInlinedCallerAfterInlinedCallee = true
302 }
303 }
304 if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
305 t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
306 }
307 }
308 }
309
310 func inlinedCaller(x int) int {
311 x = inlinedCallee(x, 1e5)
312 return x
313 }
314
315 func inlinedCallee(x, n int) int {
316 return cpuHog0(x, n)
317 }
318
319
320 func dumpCallers(pcs []uintptr) {
321 if pcs == nil {
322 return
323 }
324
325 skip := 2
326 runtime.Callers(skip, pcs)
327 }
328
329
330 func inlinedCallerDump(pcs []uintptr) {
331 inlinedCalleeDump(pcs)
332 }
333
334 func inlinedCalleeDump(pcs []uintptr) {
335 dumpCallers(pcs)
336 }
337
338 type inlineWrapperInterface interface {
339 dump(stack []uintptr)
340 }
341
342 type inlineWrapper struct {
343 }
344
345 func (h inlineWrapper) dump(pcs []uintptr) {
346 dumpCallers(pcs)
347 }
348
349 func inlinedWrapperCallerDump(pcs []uintptr) {
350 var h inlineWrapperInterface
351 h = &inlineWrapper{}
352 h.dump(pcs)
353 }
354
355 func TestCPUProfileRecursion(t *testing.T) {
356 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
357 p := testCPUProfile(t, matches, func(dur time.Duration) {
358 cpuHogger(recursionCaller, &salt1, dur)
359 })
360
361
362 for i, loc := range p.Location {
363 recursionFunc := 0
364 for _, line := range loc.Line {
365 if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
366 recursionFunc++
367 }
368 }
369 if recursionFunc > 1 {
370 t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
371 }
372 }
373 }
374
375 func recursionCaller(x int) int {
376 y := recursionCallee(3, x)
377 return y
378 }
379
380 func recursionCallee(n, x int) int {
381 if n == 0 {
382 return 1
383 }
384 y := inlinedCallee(x, 1e4)
385 return y * recursionCallee(n-1, x)
386 }
387
388 func recursionChainTop(x int, pcs []uintptr) {
389 if x < 0 {
390 return
391 }
392 recursionChainMiddle(x, pcs)
393 }
394
395 func recursionChainMiddle(x int, pcs []uintptr) {
396 recursionChainBottom(x, pcs)
397 }
398
399 func recursionChainBottom(x int, pcs []uintptr) {
400
401
402 dumpCallers(pcs)
403
404 recursionChainTop(x-1, pcs)
405 }
406
407 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
408 p, err := profile.Parse(bytes.NewReader(valBytes))
409 if err != nil {
410 t.Fatal(err)
411 }
412 for _, sample := range p.Sample {
413 count := uintptr(sample.Value[0])
414 f(count, sample.Location, sample.Label)
415 }
416 return p
417 }
418
419 func cpuProfilingBroken() bool {
420 switch runtime.GOOS {
421 case "plan9":
422
423 return true
424 case "aix":
425
426 return true
427 case "ios", "dragonfly", "netbsd", "illumos", "solaris":
428
429 return true
430 case "openbsd":
431 if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
432
433 return true
434 }
435 }
436
437 return false
438 }
439
440
441
442 func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
443 switch runtime.GOOS {
444 case "darwin":
445 out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
446 if err != nil {
447 t.Fatal(err)
448 }
449 vers := string(out)
450 t.Logf("uname -a: %v", vers)
451 case "plan9":
452 t.Skip("skipping on plan9")
453 case "wasip1":
454 t.Skip("skipping on wasip1")
455 }
456
457 broken := cpuProfilingBroken()
458
459 deadline, ok := t.Deadline()
460 if broken || !ok {
461 if broken && testing.Short() {
462
463 deadline = time.Now().Add(1 * time.Second)
464 } else {
465 deadline = time.Now().Add(10 * time.Second)
466 }
467 }
468
469
470
471 duration := 5 * time.Second
472 if testing.Short() {
473 duration = 100 * time.Millisecond
474 }
475
476
477
478
479
480
481 for {
482 var prof bytes.Buffer
483 if err := StartCPUProfile(&prof); err != nil {
484 t.Fatal(err)
485 }
486 f(duration)
487 StopCPUProfile()
488
489 if p, ok := profileOk(t, matches, prof, duration); ok {
490 return p
491 }
492
493 duration *= 2
494 if time.Until(deadline) < duration {
495 break
496 }
497 t.Logf("retrying with %s duration", duration)
498 }
499
500 if broken {
501 t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
502 }
503
504
505
506
507
508 if os.Getenv("IN_QEMU") == "1" {
509 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
510 }
511 t.FailNow()
512 return nil
513 }
514
515 var diffCPUTimeImpl func(f func()) (user, system time.Duration)
516
517 func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
518 if fn := diffCPUTimeImpl; fn != nil {
519 return fn(f)
520 }
521 t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
522 return 0, 0
523 }
524
525
526 func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
527 for _, loc := range stk {
528 for _, line := range loc.Line {
529 if strings.Contains(line.Function.Name, spec) {
530 return true
531 }
532 }
533 }
534 return false
535 }
536
537 type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
538
539 func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
540 ok = true
541
542 var samples uintptr
543 var buf strings.Builder
544 p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
545 fmt.Fprintf(&buf, "%d:", count)
546 fprintStack(&buf, stk)
547 fmt.Fprintf(&buf, " labels: %v\n", labels)
548 samples += count
549 fmt.Fprintf(&buf, "\n")
550 })
551 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
552
553 if samples < 10 && runtime.GOOS == "windows" {
554
555
556
557 t.Log("too few samples on Windows (golang.org/issue/10842)")
558 return p, false
559 }
560
561
562
563
564
565
566 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
567 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
568 ok = false
569 }
570
571 if matches != nil && !matches(t, p) {
572 ok = false
573 }
574
575 return p, ok
576 }
577
578 type profileMatchFunc func(*testing.T, *profile.Profile) bool
579
580 func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
581 return func(t *testing.T, p *profile.Profile) (ok bool) {
582 ok = true
583
584
585
586 have := make([]uintptr, len(need))
587 avoidSamples := make([]uintptr, len(avoid))
588
589 for _, sample := range p.Sample {
590 count := uintptr(sample.Value[0])
591 for i, spec := range need {
592 if matches(spec, count, sample.Location, sample.Label) {
593 have[i] += count
594 }
595 }
596 for i, name := range avoid {
597 for _, loc := range sample.Location {
598 for _, line := range loc.Line {
599 if strings.Contains(line.Function.Name, name) {
600 avoidSamples[i] += count
601 }
602 }
603 }
604 }
605 }
606
607 for i, name := range avoid {
608 bad := avoidSamples[i]
609 if bad != 0 {
610 t.Logf("found %d samples in avoid-function %s\n", bad, name)
611 ok = false
612 }
613 }
614
615 if len(need) == 0 {
616 return
617 }
618
619 var total uintptr
620 for i, name := range need {
621 total += have[i]
622 t.Logf("found %d samples in expected function %s\n", have[i], name)
623 }
624 if total == 0 {
625 t.Logf("no samples in expected functions")
626 ok = false
627 }
628
629
630
631
632
633 min := uintptr(1)
634 for i, name := range need {
635 if have[i] < min {
636 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
637 ok = false
638 }
639 }
640 return
641 }
642 }
643
644
645
646 func TestCPUProfileWithFork(t *testing.T) {
647 testenv.MustHaveExec(t)
648
649 exe, err := os.Executable()
650 if err != nil {
651 t.Fatal(err)
652 }
653
654 heap := 1 << 30
655 if runtime.GOOS == "android" {
656
657 heap = 100 << 20
658 }
659 if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
660
661 heap = 100 << 20
662 }
663 if testing.Short() {
664 heap = 100 << 20
665 }
666
667 garbage := make([]byte, heap)
668
669 done := make(chan bool)
670 go func() {
671 for i := range garbage {
672 garbage[i] = 42
673 }
674 done <- true
675 }()
676 <-done
677
678 var prof bytes.Buffer
679 if err := StartCPUProfile(&prof); err != nil {
680 t.Fatal(err)
681 }
682 defer StopCPUProfile()
683
684 for i := 0; i < 10; i++ {
685 testenv.Command(t, exe, "-h").CombinedOutput()
686 }
687 }
688
689
690
691
692 func TestGoroutineSwitch(t *testing.T) {
693 if runtime.Compiler == "gccgo" {
694 t.Skip("not applicable for gccgo")
695 }
696
697
698
699 tries := 10
700 count := 1000000
701 if testing.Short() {
702 tries = 1
703 }
704 for try := 0; try < tries; try++ {
705 var prof bytes.Buffer
706 if err := StartCPUProfile(&prof); err != nil {
707 t.Fatal(err)
708 }
709 for i := 0; i < count; i++ {
710 runtime.Gosched()
711 }
712 StopCPUProfile()
713
714
715
716
717
718 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
719
720
721 if len(stk) == 2 {
722 name := stk[1].Line[0].Function.Name
723 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
724 return
725 }
726 }
727
728
729
730 if len(stk) == 1 {
731 return
732 }
733
734
735
736 name := stk[0].Line[0].Function.Name
737 if name == "gogo" {
738 var buf strings.Builder
739 fprintStack(&buf, stk)
740 t.Fatalf("found profile entry for gogo:\n%s", buf.String())
741 }
742 })
743 }
744 }
745
746 func fprintStack(w io.Writer, stk []*profile.Location) {
747 if len(stk) == 0 {
748 fmt.Fprintf(w, " (stack empty)")
749 }
750 for _, loc := range stk {
751 fmt.Fprintf(w, " %#x", loc.Address)
752 fmt.Fprintf(w, " (")
753 for i, line := range loc.Line {
754 if i > 0 {
755 fmt.Fprintf(w, " ")
756 }
757 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
758 }
759 fmt.Fprintf(w, ")")
760 }
761 }
762
763
764 func TestMathBigDivide(t *testing.T) {
765 testCPUProfile(t, nil, func(duration time.Duration) {
766 t := time.After(duration)
767 pi := new(big.Int)
768 for {
769 for i := 0; i < 100; i++ {
770 n := big.NewInt(2646693125139304345)
771 d := big.NewInt(842468587426513207)
772 pi.Div(n, d)
773 }
774 select {
775 case <-t:
776 return
777 default:
778 }
779 }
780 })
781 }
782
783
784 func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
785 for _, f := range strings.Split(spec, ",") {
786 if !stackContains(f, count, stk, labels) {
787 return false
788 }
789 }
790 return true
791 }
792
793 func TestMorestack(t *testing.T) {
794 matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
795 testCPUProfile(t, matches, func(duration time.Duration) {
796 t := time.After(duration)
797 c := make(chan bool)
798 for {
799 go func() {
800 growstack1()
801 c <- true
802 }()
803 select {
804 case <-t:
805 return
806 case <-c:
807 }
808 }
809 })
810 }
811
812
813 func growstack1() {
814 growstack(10)
815 }
816
817
818 func growstack(n int) {
819 var buf [8 << 18]byte
820 use(buf)
821 if n > 0 {
822 growstack(n - 1)
823 }
824 }
825
826
827 func use(x [8 << 18]byte) {}
828
829 func TestBlockProfile(t *testing.T) {
830 type TestCase struct {
831 name string
832 f func(*testing.T)
833 stk []string
834 re string
835 }
836 tests := [...]TestCase{
837 {
838 name: "chan recv",
839 f: blockChanRecv,
840 stk: []string{
841 "runtime.chanrecv1",
842 "runtime/pprof.blockChanRecv",
843 "runtime/pprof.TestBlockProfile",
844 },
845 re: `
846 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
847 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
848 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
849 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
850 `},
851 {
852 name: "chan send",
853 f: blockChanSend,
854 stk: []string{
855 "runtime.chansend1",
856 "runtime/pprof.blockChanSend",
857 "runtime/pprof.TestBlockProfile",
858 },
859 re: `
860 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
861 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
862 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
863 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
864 `},
865 {
866 name: "chan close",
867 f: blockChanClose,
868 stk: []string{
869 "runtime.chanrecv1",
870 "runtime/pprof.blockChanClose",
871 "runtime/pprof.TestBlockProfile",
872 },
873 re: `
874 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
875 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
876 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
877 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
878 `},
879 {
880 name: "select recv async",
881 f: blockSelectRecvAsync,
882 stk: []string{
883 "runtime.selectgo",
884 "runtime/pprof.blockSelectRecvAsync",
885 "runtime/pprof.TestBlockProfile",
886 },
887 re: `
888 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
889 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
890 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
891 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
892 `},
893 {
894 name: "select send sync",
895 f: blockSelectSendSync,
896 stk: []string{
897 "runtime.selectgo",
898 "runtime/pprof.blockSelectSendSync",
899 "runtime/pprof.TestBlockProfile",
900 },
901 re: `
902 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
903 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
904 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
905 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
906 `},
907 {
908 name: "mutex",
909 f: blockMutex,
910 stk: []string{
911 "sync.(*Mutex).Lock",
912 "runtime/pprof.blockMutex",
913 "runtime/pprof.TestBlockProfile",
914 },
915 re: `
916 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
917 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*sync/mutex\.go:[0-9]+
918 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
919 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
920 `},
921 {
922 name: "cond",
923 f: blockCond,
924 stk: []string{
925 "sync.(*Cond).Wait",
926 "runtime/pprof.blockCond",
927 "runtime/pprof.TestBlockProfile",
928 },
929 re: `
930 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
931 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*sync/cond\.go:[0-9]+
932 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
933 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
934 `},
935 }
936
937
938 runtime.SetBlockProfileRate(1)
939 defer runtime.SetBlockProfileRate(0)
940 for _, test := range tests {
941 test.f(t)
942 }
943
944 t.Run("debug=1", func(t *testing.T) {
945 var w strings.Builder
946 Lookup("block").WriteTo(&w, 1)
947 prof := w.String()
948
949 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
950 t.Fatalf("Bad profile header:\n%v", prof)
951 }
952
953 if strings.HasSuffix(prof, "#\t0x0\n\n") {
954 t.Errorf("Useless 0 suffix:\n%v", prof)
955 }
956
957 for _, test := range tests {
958 if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
959 t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
960 }
961 }
962 })
963
964 t.Run("proto", func(t *testing.T) {
965
966 var w bytes.Buffer
967 Lookup("block").WriteTo(&w, 0)
968 p, err := profile.Parse(&w)
969 if err != nil {
970 t.Fatalf("failed to parse profile: %v", err)
971 }
972 t.Logf("parsed proto: %s", p)
973 if err := p.CheckValid(); err != nil {
974 t.Fatalf("invalid profile: %v", err)
975 }
976
977 stks := profileStacks(p)
978 for _, test := range tests {
979 if !containsStack(stks, test.stk) {
980 t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
981 }
982 }
983 })
984
985 }
986
987 func profileStacks(p *profile.Profile) (res [][]string) {
988 for _, s := range p.Sample {
989 var stk []string
990 for _, l := range s.Location {
991 for _, line := range l.Line {
992 stk = append(stk, line.Function.Name)
993 }
994 }
995 res = append(res, stk)
996 }
997 return res
998 }
999
1000 func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
1001 for _, record := range records {
1002 frames := runtime.CallersFrames(record.Stack())
1003 var stk []string
1004 for {
1005 frame, more := frames.Next()
1006 stk = append(stk, frame.Function)
1007 if !more {
1008 break
1009 }
1010 }
1011 res = append(res, stk)
1012 }
1013 return res
1014 }
1015
1016 func containsStack(got [][]string, want []string) bool {
1017 for _, stk := range got {
1018 if len(stk) < len(want) {
1019 continue
1020 }
1021 for i, f := range want {
1022 if f != stk[i] {
1023 break
1024 }
1025 if i == len(want)-1 {
1026 return true
1027 }
1028 }
1029 }
1030 return false
1031 }
1032
1033
1034
1035
1036 func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
1037 re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
1038 r := regexp.MustCompile(re)
1039
1040 if deadline, ok := t.Deadline(); ok {
1041 if d := time.Until(deadline); d > 1*time.Second {
1042 timer := time.AfterFunc(d-1*time.Second, func() {
1043 debug.SetTraceback("all")
1044 panic(fmt.Sprintf("timed out waiting for %#q", re))
1045 })
1046 defer timer.Stop()
1047 }
1048 }
1049
1050 buf := make([]byte, 64<<10)
1051 for {
1052 runtime.Gosched()
1053 n := runtime.Stack(buf, true)
1054 if n == len(buf) {
1055
1056
1057 buf = make([]byte, 2*len(buf))
1058 continue
1059 }
1060 if len(r.FindAll(buf[:n], -1)) >= count {
1061 return
1062 }
1063 }
1064 }
1065
1066 func blockChanRecv(t *testing.T) {
1067 c := make(chan bool)
1068 go func() {
1069 awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
1070 c <- true
1071 }()
1072 <-c
1073 }
1074
1075 func blockChanSend(t *testing.T) {
1076 c := make(chan bool)
1077 go func() {
1078 awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
1079 <-c
1080 }()
1081 c <- true
1082 }
1083
1084 func blockChanClose(t *testing.T) {
1085 c := make(chan bool)
1086 go func() {
1087 awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
1088 close(c)
1089 }()
1090 <-c
1091 }
1092
1093 func blockSelectRecvAsync(t *testing.T) {
1094 const numTries = 3
1095 c := make(chan bool, 1)
1096 c2 := make(chan bool, 1)
1097 go func() {
1098 for i := 0; i < numTries; i++ {
1099 awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
1100 c <- true
1101 }
1102 }()
1103 for i := 0; i < numTries; i++ {
1104 select {
1105 case <-c:
1106 case <-c2:
1107 }
1108 }
1109 }
1110
1111 func blockSelectSendSync(t *testing.T) {
1112 c := make(chan bool)
1113 c2 := make(chan bool)
1114 go func() {
1115 awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
1116 <-c
1117 }()
1118 select {
1119 case c <- true:
1120 case c2 <- true:
1121 }
1122 }
1123
1124 func blockMutex(t *testing.T) {
1125 var mu sync.Mutex
1126 mu.Lock()
1127 go func() {
1128 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
1129 mu.Unlock()
1130 }()
1131
1132
1133
1134
1135 mu.Lock()
1136 }
1137
1138 func blockMutexN(t *testing.T, n int, d time.Duration) {
1139 var wg sync.WaitGroup
1140 var mu sync.Mutex
1141 mu.Lock()
1142 go func() {
1143 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
1144 time.Sleep(d)
1145 mu.Unlock()
1146 }()
1147
1148
1149
1150
1151 for i := 0; i < n; i++ {
1152 wg.Add(1)
1153 go func() {
1154 defer wg.Done()
1155 mu.Lock()
1156 mu.Unlock()
1157 }()
1158 }
1159 wg.Wait()
1160 }
1161
1162 func blockCond(t *testing.T) {
1163 var mu sync.Mutex
1164 c := sync.NewCond(&mu)
1165 mu.Lock()
1166 go func() {
1167 awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
1168 mu.Lock()
1169 c.Signal()
1170 mu.Unlock()
1171 }()
1172 c.Wait()
1173 mu.Unlock()
1174 }
1175
1176
1177 func TestBlockProfileBias(t *testing.T) {
1178 rate := int(1000)
1179 runtime.SetBlockProfileRate(rate)
1180 defer runtime.SetBlockProfileRate(0)
1181
1182
1183 blockFrequentShort(rate)
1184 blockInfrequentLong(rate)
1185
1186 var w bytes.Buffer
1187 Lookup("block").WriteTo(&w, 0)
1188 p, err := profile.Parse(&w)
1189 if err != nil {
1190 t.Fatalf("failed to parse profile: %v", err)
1191 }
1192 t.Logf("parsed proto: %s", p)
1193
1194 il := float64(-1)
1195 fs := float64(-1)
1196 for _, s := range p.Sample {
1197 for _, l := range s.Location {
1198 for _, line := range l.Line {
1199 if len(s.Value) < 2 {
1200 t.Fatal("block profile has less than 2 sample types")
1201 }
1202
1203 if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
1204 il = float64(s.Value[1])
1205 } else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
1206 fs = float64(s.Value[1])
1207 }
1208 }
1209 }
1210 }
1211 if il == -1 || fs == -1 {
1212 t.Fatal("block profile is missing expected functions")
1213 }
1214
1215
1216 const threshold = 0.2
1217 if bias := (il - fs) / il; math.Abs(bias) > threshold {
1218 t.Fatalf("bias: abs(%f) > %f", bias, threshold)
1219 } else {
1220 t.Logf("bias: abs(%f) < %f", bias, threshold)
1221 }
1222 }
1223
1224
1225
1226 func blockFrequentShort(rate int) {
1227 for i := 0; i < 100000; i++ {
1228 blockevent(int64(rate/10), 1)
1229 }
1230 }
1231
1232
1233
1234 func blockInfrequentLong(rate int) {
1235 for i := 0; i < 10000; i++ {
1236 blockevent(int64(rate), 1)
1237 }
1238 }
1239
1240
1241
1242
1243 func blockevent(cycles int64, skip int)
1244
1245 func TestMutexProfile(t *testing.T) {
1246
1247
1248 old := runtime.SetMutexProfileFraction(1)
1249 defer runtime.SetMutexProfileFraction(old)
1250 if old != 0 {
1251 t.Fatalf("need MutexProfileRate 0, got %d", old)
1252 }
1253
1254 const (
1255 N = 100
1256 D = 100 * time.Millisecond
1257 )
1258 start := time.Now()
1259 blockMutexN(t, N, D)
1260 blockMutexNTime := time.Since(start)
1261
1262 t.Run("debug=1", func(t *testing.T) {
1263 var w strings.Builder
1264 Lookup("mutex").WriteTo(&w, 1)
1265 prof := w.String()
1266 t.Logf("received profile: %v", prof)
1267
1268 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
1269 t.Errorf("Bad profile header:\n%v", prof)
1270 }
1271 prof = strings.Trim(prof, "\n")
1272 lines := strings.Split(prof, "\n")
1273 if len(lines) < 6 {
1274 t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
1275 }
1276
1277 r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
1278 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
1279 t.Errorf("%q didn't match %q", lines[3], r2)
1280 }
1281 r3 := "^#.*runtime/pprof.blockMutex.*$"
1282 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
1283 t.Errorf("%q didn't match %q", lines[5], r3)
1284 }
1285 t.Log(prof)
1286 })
1287 t.Run("proto", func(t *testing.T) {
1288
1289 var w bytes.Buffer
1290 Lookup("mutex").WriteTo(&w, 0)
1291 p, err := profile.Parse(&w)
1292 if err != nil {
1293 t.Fatalf("failed to parse profile: %v", err)
1294 }
1295 t.Logf("parsed proto: %s", p)
1296 if err := p.CheckValid(); err != nil {
1297 t.Fatalf("invalid profile: %v", err)
1298 }
1299
1300 stks := profileStacks(p)
1301 for _, want := range [][]string{
1302 {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
1303 } {
1304 if !containsStack(stks, want) {
1305 t.Errorf("No matching stack entry for %+v", want)
1306 }
1307 }
1308
1309 i := 0
1310 for ; i < len(p.SampleType); i++ {
1311 if p.SampleType[i].Unit == "nanoseconds" {
1312 break
1313 }
1314 }
1315 if i >= len(p.SampleType) {
1316 t.Fatalf("profile did not contain nanoseconds sample")
1317 }
1318 total := int64(0)
1319 for _, s := range p.Sample {
1320 total += s.Value[i]
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330 d := time.Duration(total)
1331 lo := time.Duration(N * D * 9 / 10)
1332 hi := time.Duration(N) * blockMutexNTime * 11 / 10
1333 if d < lo || d > hi {
1334 for _, s := range p.Sample {
1335 t.Logf("sample: %s", time.Duration(s.Value[i]))
1336 }
1337 t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
1338 }
1339 })
1340
1341 t.Run("records", func(t *testing.T) {
1342
1343 var records []runtime.BlockProfileRecord
1344 for {
1345 n, ok := runtime.MutexProfile(records)
1346 if ok {
1347 records = records[:n]
1348 break
1349 }
1350 records = make([]runtime.BlockProfileRecord, n*2)
1351 }
1352
1353
1354
1355
1356 stks := blockRecordStacks(records)
1357 want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
1358 if !containsStack(stks, want) {
1359 t.Errorf("No matching stack entry for %+v", want)
1360 }
1361 })
1362 }
1363
1364 func TestMutexProfileRateAdjust(t *testing.T) {
1365 old := runtime.SetMutexProfileFraction(1)
1366 defer runtime.SetMutexProfileFraction(old)
1367 if old != 0 {
1368 t.Fatalf("need MutexProfileRate 0, got %d", old)
1369 }
1370
1371 readProfile := func() (contentions int64, delay int64) {
1372 var w bytes.Buffer
1373 Lookup("mutex").WriteTo(&w, 0)
1374 p, err := profile.Parse(&w)
1375 if err != nil {
1376 t.Fatalf("failed to parse profile: %v", err)
1377 }
1378 t.Logf("parsed proto: %s", p)
1379 if err := p.CheckValid(); err != nil {
1380 t.Fatalf("invalid profile: %v", err)
1381 }
1382
1383 for _, s := range p.Sample {
1384 var match, runtimeInternal bool
1385 for _, l := range s.Location {
1386 for _, line := range l.Line {
1387 if line.Function.Name == "runtime/pprof.blockMutex.func1" {
1388 match = true
1389 }
1390 if line.Function.Name == "runtime.unlock" {
1391 runtimeInternal = true
1392 }
1393 }
1394 }
1395 if match && !runtimeInternal {
1396 contentions += s.Value[0]
1397 delay += s.Value[1]
1398 }
1399 }
1400 return
1401 }
1402
1403 blockMutex(t)
1404 contentions, delay := readProfile()
1405 if contentions == 0 {
1406 t.Fatal("did not see expected function in profile")
1407 }
1408 runtime.SetMutexProfileFraction(0)
1409 newContentions, newDelay := readProfile()
1410 if newContentions != contentions || newDelay != delay {
1411 t.Fatalf("sample value changed: got [%d, %d], want [%d, %d]", newContentions, newDelay, contentions, delay)
1412 }
1413 }
1414
1415 func func1(c chan int) { <-c }
1416 func func2(c chan int) { <-c }
1417 func func3(c chan int) { <-c }
1418 func func4(c chan int) { <-c }
1419
1420 func TestGoroutineCounts(t *testing.T) {
1421
1422
1423 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
1424
1425 c := make(chan int)
1426 for i := 0; i < 100; i++ {
1427 switch {
1428 case i%10 == 0:
1429 go func1(c)
1430 case i%2 == 0:
1431 go func2(c)
1432 default:
1433 go func3(c)
1434 }
1435
1436 for j := 0; j < 5; j++ {
1437 runtime.Gosched()
1438 }
1439 }
1440 ctx := context.Background()
1441
1442
1443
1444 Do(ctx, Labels("label", "value"), func(context.Context) {
1445 for i := 0; i < 89; i++ {
1446 switch {
1447 case i%10 == 0:
1448 go func1(c)
1449 case i%2 == 0:
1450 go func2(c)
1451 default:
1452 go func3(c)
1453 }
1454
1455 for j := 0; j < 5; j++ {
1456 runtime.Gosched()
1457 }
1458 }
1459 })
1460
1461 SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
1462 defer SetGoroutineLabels(context.Background())
1463
1464 garbage := new(*int)
1465 fingReady := make(chan struct{})
1466 runtime.SetFinalizer(garbage, func(v **int) {
1467 Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
1468 close(fingReady)
1469 <-c
1470 })
1471 })
1472 garbage = nil
1473 for i := 0; i < 2; i++ {
1474 runtime.GC()
1475 }
1476 <-fingReady
1477
1478 var w bytes.Buffer
1479 goroutineProf := Lookup("goroutine")
1480
1481
1482 goroutineProf.WriteTo(&w, 1)
1483 prof := w.String()
1484
1485 labels := labelMap{"label": "value"}
1486 labelStr := "\n# labels: " + labels.String()
1487 selfLabel := labelMap{"self-label": "self-value"}
1488 selfLabelStr := "\n# labels: " + selfLabel.String()
1489 fingLabel := labelMap{"fing-label": "fing-value"}
1490 fingLabelStr := "\n# labels: " + fingLabel.String()
1491 orderedPrefix := []string{
1492 "\n50 @ ",
1493 "\n44 @", labelStr,
1494 "\n40 @",
1495 "\n36 @", labelStr,
1496 "\n10 @",
1497 "\n9 @", labelStr,
1498 "\n1 @"}
1499 if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
1500 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1501 }
1502 if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
1503 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1504 }
1505
1506
1507 w.Reset()
1508 goroutineProf.WriteTo(&w, 0)
1509 p, err := profile.Parse(&w)
1510 if err != nil {
1511 t.Errorf("error parsing protobuf profile: %v", err)
1512 }
1513 if err := p.CheckValid(); err != nil {
1514 t.Errorf("protobuf profile is invalid: %v", err)
1515 }
1516 expectedLabels := map[int64]map[string]string{
1517 50: {},
1518 44: {"label": "value"},
1519 40: {},
1520 36: {"label": "value"},
1521 10: {},
1522 9: {"label": "value"},
1523 1: {"self-label": "self-value", "fing-label": "fing-value"},
1524 }
1525 if !containsCountsLabels(p, expectedLabels) {
1526 t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
1527 expectedLabels, p)
1528 }
1529
1530 close(c)
1531
1532 time.Sleep(10 * time.Millisecond)
1533 }
1534
1535 func containsInOrder(s string, all ...string) bool {
1536 for _, t := range all {
1537 var ok bool
1538 if _, s, ok = strings.Cut(s, t); !ok {
1539 return false
1540 }
1541 }
1542 return true
1543 }
1544
1545 func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
1546 m := make(map[int64]int)
1547 type nkey struct {
1548 count int64
1549 key, val string
1550 }
1551 n := make(map[nkey]int)
1552 for c, kv := range countLabels {
1553 m[c]++
1554 for k, v := range kv {
1555 n[nkey{
1556 count: c,
1557 key: k,
1558 val: v,
1559 }]++
1560
1561 }
1562 }
1563 for _, s := range prof.Sample {
1564
1565 if len(s.Value) != 1 {
1566 return false
1567 }
1568 m[s.Value[0]]--
1569 for k, vs := range s.Label {
1570 for _, v := range vs {
1571 n[nkey{
1572 count: s.Value[0],
1573 key: k,
1574 val: v,
1575 }]--
1576 }
1577 }
1578 }
1579 for _, n := range m {
1580 if n > 0 {
1581 return false
1582 }
1583 }
1584 for _, ncnt := range n {
1585 if ncnt != 0 {
1586 return false
1587 }
1588 }
1589 return true
1590 }
1591
1592 func TestGoroutineProfileConcurrency(t *testing.T) {
1593 testenv.MustHaveParallelism(t)
1594
1595 goroutineProf := Lookup("goroutine")
1596
1597 profilerCalls := func(s string) int {
1598 return strings.Count(s, "\truntime/pprof.runtime_goroutineProfileWithLabels+")
1599 }
1600
1601 includesFinalizer := func(s string) bool {
1602 return strings.Contains(s, "runtime.runfinq")
1603 }
1604
1605
1606
1607 t.Run("overlapping profile requests", func(t *testing.T) {
1608 ctx := context.Background()
1609 ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
1610 defer cancel()
1611
1612 var wg sync.WaitGroup
1613 for i := 0; i < 2; i++ {
1614 wg.Add(1)
1615 Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
1616 go func() {
1617 defer wg.Done()
1618 for ctx.Err() == nil {
1619 var w strings.Builder
1620 goroutineProf.WriteTo(&w, 1)
1621 prof := w.String()
1622 count := profilerCalls(prof)
1623 if count >= 2 {
1624 t.Logf("prof %d\n%s", count, prof)
1625 cancel()
1626 }
1627 }
1628 }()
1629 })
1630 }
1631 wg.Wait()
1632 })
1633
1634
1635
1636 t.Run("finalizer not present", func(t *testing.T) {
1637 var w strings.Builder
1638 goroutineProf.WriteTo(&w, 1)
1639 prof := w.String()
1640 if includesFinalizer(prof) {
1641 t.Errorf("profile includes finalizer (but finalizer should be marked as system):\n%s", prof)
1642 }
1643 })
1644
1645
1646 t.Run("finalizer present", func(t *testing.T) {
1647
1648
1649
1650 type T *byte
1651 obj := new(T)
1652 ch1, ch2 := make(chan int), make(chan int)
1653 defer close(ch2)
1654 runtime.SetFinalizer(obj, func(_ interface{}) {
1655 close(ch1)
1656 <-ch2
1657 })
1658 obj = nil
1659 for i := 10; i >= 0; i-- {
1660 select {
1661 case <-ch1:
1662 default:
1663 if i == 0 {
1664 t.Fatalf("finalizer did not run")
1665 }
1666 runtime.GC()
1667 }
1668 }
1669 var w strings.Builder
1670 goroutineProf.WriteTo(&w, 1)
1671 prof := w.String()
1672 if !includesFinalizer(prof) {
1673 t.Errorf("profile does not include finalizer (and it should be marked as user):\n%s", prof)
1674 }
1675 })
1676
1677
1678 testLaunches := func(t *testing.T) {
1679 var done sync.WaitGroup
1680 defer done.Wait()
1681
1682 ctx := context.Background()
1683 ctx, cancel := context.WithCancel(ctx)
1684 defer cancel()
1685
1686 ch := make(chan int)
1687 defer close(ch)
1688
1689 var ready sync.WaitGroup
1690
1691
1692
1693
1694 ready.Add(1)
1695 done.Add(1)
1696 go func() {
1697 defer done.Done()
1698 for i := 0; ctx.Err() == nil; i++ {
1699
1700
1701 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-loop-i", fmt.Sprint(i))))
1702 done.Add(1)
1703 go func() {
1704 <-ch
1705 done.Done()
1706 }()
1707 for j := 0; j < i; j++ {
1708
1709
1710
1711
1712
1713
1714 runtime.Gosched()
1715 }
1716 if i == 0 {
1717 ready.Done()
1718 }
1719 }
1720 }()
1721
1722
1723
1724
1725 ready.Add(1)
1726 var churn func(i int)
1727 churn = func(i int) {
1728 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i))))
1729 if i == 0 {
1730 ready.Done()
1731 } else if i%16 == 0 {
1732
1733
1734 runtime.Gosched()
1735 }
1736 if ctx.Err() == nil {
1737 go churn(i + 1)
1738 }
1739 }
1740 go func() {
1741 churn(0)
1742 }()
1743
1744 ready.Wait()
1745
1746 var w [3]bytes.Buffer
1747 for i := range w {
1748 goroutineProf.WriteTo(&w[i], 0)
1749 }
1750 for i := range w {
1751 p, err := profile.Parse(bytes.NewReader(w[i].Bytes()))
1752 if err != nil {
1753 t.Errorf("error parsing protobuf profile: %v", err)
1754 }
1755
1756
1757
1758 counts := make(map[string]int)
1759 for _, s := range p.Sample {
1760 label := s.Label[t.Name()+"-loop-i"]
1761 if len(label) > 0 {
1762 counts[label[0]]++
1763 }
1764 }
1765 for j, max := 0, len(counts)-1; j <= max; j++ {
1766 n := counts[fmt.Sprint(j)]
1767 if n == 1 || (n == 2 && j == max) {
1768 continue
1769 }
1770 t.Errorf("profile #%d's goroutines with label loop-i:%d; %d != 1 (or 2 for the last entry, %d)",
1771 i+1, j, n, max)
1772 t.Logf("counts %v", counts)
1773 break
1774 }
1775 }
1776 }
1777
1778 runs := 100
1779 if testing.Short() {
1780 runs = 5
1781 }
1782 for i := 0; i < runs; i++ {
1783
1784 t.Run("goroutine launches", testLaunches)
1785 }
1786 }
1787
1788
1789 func TestGoroutineProfileCoro(t *testing.T) {
1790 testenv.MustHaveParallelism(t)
1791
1792 goroutineProf := Lookup("goroutine")
1793
1794
1795 iterFunc := func() {
1796 p, stop := iter.Pull2(
1797 func(yield func(int, int) bool) {
1798 for i := 0; i < 10000; i++ {
1799 if !yield(i, i) {
1800 return
1801 }
1802 }
1803 },
1804 )
1805 defer stop()
1806 for {
1807 _, _, ok := p()
1808 if !ok {
1809 break
1810 }
1811 }
1812 }
1813 var wg sync.WaitGroup
1814 done := make(chan struct{})
1815 wg.Add(1)
1816 go func() {
1817 defer wg.Done()
1818 for {
1819 iterFunc()
1820 select {
1821 case <-done:
1822 default:
1823 }
1824 }
1825 }()
1826
1827
1828
1829 goroutineProf.WriteTo(io.Discard, 1)
1830 }
1831
1832 func BenchmarkGoroutine(b *testing.B) {
1833 withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
1834 return func(b *testing.B) {
1835 c := make(chan int)
1836 var ready, done sync.WaitGroup
1837 defer func() {
1838 close(c)
1839 done.Wait()
1840 }()
1841
1842 for i := 0; i < n; i++ {
1843 ready.Add(1)
1844 done.Add(1)
1845 go func() {
1846 ready.Done()
1847 <-c
1848 done.Done()
1849 }()
1850 }
1851
1852 ready.Wait()
1853 for i := 0; i < 5; i++ {
1854 runtime.Gosched()
1855 }
1856
1857 fn(b)
1858 }
1859 }
1860
1861 withChurn := func(fn func(b *testing.B)) func(b *testing.B) {
1862 return func(b *testing.B) {
1863 ctx := context.Background()
1864 ctx, cancel := context.WithCancel(ctx)
1865 defer cancel()
1866
1867 var ready sync.WaitGroup
1868 ready.Add(1)
1869 var count int64
1870 var churn func(i int)
1871 churn = func(i int) {
1872 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
1873 atomic.AddInt64(&count, 1)
1874 if i == 0 {
1875 ready.Done()
1876 }
1877 if ctx.Err() == nil {
1878 go churn(i + 1)
1879 }
1880 }
1881 go func() {
1882 churn(0)
1883 }()
1884 ready.Wait()
1885
1886 fn(b)
1887 b.ReportMetric(float64(atomic.LoadInt64(&count))/float64(b.N), "concurrent_launches/op")
1888 }
1889 }
1890
1891 benchWriteTo := func(b *testing.B) {
1892 goroutineProf := Lookup("goroutine")
1893 b.ResetTimer()
1894 for i := 0; i < b.N; i++ {
1895 goroutineProf.WriteTo(io.Discard, 0)
1896 }
1897 b.StopTimer()
1898 }
1899
1900 benchGoroutineProfile := func(b *testing.B) {
1901 p := make([]runtime.StackRecord, 10000)
1902 b.ResetTimer()
1903 for i := 0; i < b.N; i++ {
1904 runtime.GoroutineProfile(p)
1905 }
1906 b.StopTimer()
1907 }
1908
1909
1910
1911
1912 for _, n := range []int{50, 500, 5000} {
1913 b.Run(fmt.Sprintf("Profile.WriteTo idle %d", n), withIdle(n, benchWriteTo))
1914 b.Run(fmt.Sprintf("Profile.WriteTo churn %d", n), withIdle(n, withChurn(benchWriteTo)))
1915 b.Run(fmt.Sprintf("runtime.GoroutineProfile churn %d", n), withIdle(n, withChurn(benchGoroutineProfile)))
1916 }
1917 }
1918
1919 var emptyCallStackTestRun int64
1920
1921
1922 func TestEmptyCallStack(t *testing.T) {
1923 name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
1924 emptyCallStackTestRun++
1925
1926 t.Parallel()
1927 var buf strings.Builder
1928 p := NewProfile(name)
1929
1930 p.Add("foo", 47674)
1931 p.WriteTo(&buf, 1)
1932 p.Remove("foo")
1933 got := buf.String()
1934 prefix := name + " profile: total 1\n"
1935 if !strings.HasPrefix(got, prefix) {
1936 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
1937 }
1938 lostevent := "lostProfileEvent"
1939 if !strings.Contains(got, lostevent) {
1940 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
1941 }
1942 }
1943
1944
1945
1946 func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
1947 base, kv, ok := strings.Cut(spec, ";")
1948 if !ok {
1949 panic("no semicolon in key/value spec")
1950 }
1951 k, v, ok := strings.Cut(kv, "=")
1952 if !ok {
1953 panic("missing = in key/value spec")
1954 }
1955 if !slices.Contains(labels[k], v) {
1956 return false
1957 }
1958 return stackContains(base, count, stk, labels)
1959 }
1960
1961 func TestCPUProfileLabel(t *testing.T) {
1962 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
1963 testCPUProfile(t, matches, func(dur time.Duration) {
1964 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1965 cpuHogger(cpuHog1, &salt1, dur)
1966 })
1967 })
1968 }
1969
1970 func TestLabelRace(t *testing.T) {
1971 testenv.MustHaveParallelism(t)
1972
1973
1974
1975 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
1976 testCPUProfile(t, matches, func(dur time.Duration) {
1977 start := time.Now()
1978 var wg sync.WaitGroup
1979 for time.Since(start) < dur {
1980 var salts [10]int
1981 for i := 0; i < 10; i++ {
1982 wg.Add(1)
1983 go func(j int) {
1984 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1985 cpuHogger(cpuHog1, &salts[j], time.Millisecond)
1986 })
1987 wg.Done()
1988 }(i)
1989 }
1990 wg.Wait()
1991 }
1992 })
1993 }
1994
1995 func TestGoroutineProfileLabelRace(t *testing.T) {
1996 testenv.MustHaveParallelism(t)
1997
1998
1999
2000
2001 t.Run("reset", func(t *testing.T) {
2002 ctx := context.Background()
2003 ctx, cancel := context.WithCancel(ctx)
2004 defer cancel()
2005
2006 go func() {
2007 goroutineProf := Lookup("goroutine")
2008 for ctx.Err() == nil {
2009 var w strings.Builder
2010 goroutineProf.WriteTo(&w, 1)
2011 prof := w.String()
2012 if strings.Contains(prof, "loop-i") {
2013 cancel()
2014 }
2015 }
2016 }()
2017
2018 for i := 0; ctx.Err() == nil; i++ {
2019 Do(ctx, Labels("loop-i", fmt.Sprint(i)), func(ctx context.Context) {
2020 })
2021 }
2022 })
2023
2024 t.Run("churn", func(t *testing.T) {
2025 ctx := context.Background()
2026 ctx, cancel := context.WithCancel(ctx)
2027 defer cancel()
2028
2029 var ready sync.WaitGroup
2030 ready.Add(1)
2031 var churn func(i int)
2032 churn = func(i int) {
2033 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
2034 if i == 0 {
2035 ready.Done()
2036 }
2037 if ctx.Err() == nil {
2038 go churn(i + 1)
2039 }
2040 }
2041 go func() {
2042 churn(0)
2043 }()
2044 ready.Wait()
2045
2046 goroutineProf := Lookup("goroutine")
2047 for i := 0; i < 10; i++ {
2048 goroutineProf.WriteTo(io.Discard, 1)
2049 }
2050 })
2051 }
2052
2053
2054
2055 func TestLabelSystemstack(t *testing.T) {
2056
2057
2058 gogc := debug.SetGCPercent(100)
2059 debug.SetGCPercent(gogc)
2060
2061 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
2062 p := testCPUProfile(t, matches, func(dur time.Duration) {
2063 Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
2064 parallelLabelHog(ctx, dur, gogc)
2065 })
2066 })
2067
2068
2069
2070
2071 for _, s := range p.Sample {
2072 isLabeled := s.Label != nil && slices.Contains(s.Label["key"], "value")
2073 var (
2074 mayBeLabeled bool
2075 mustBeLabeled string
2076 mustNotBeLabeled string
2077 )
2078 for _, loc := range s.Location {
2079 for _, l := range loc.Line {
2080 switch l.Function.Name {
2081 case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
2082 mustBeLabeled = l.Function.Name
2083 case "runtime/pprof.Do":
2084
2085
2086
2087
2088 mayBeLabeled = true
2089 case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
2090
2091
2092
2093
2094 mustNotBeLabeled = l.Function.Name
2095 case "gogo", "gosave_systemstack_switch", "racecall":
2096
2097
2098
2099
2100
2101
2102 mayBeLabeled = true
2103 }
2104
2105 if strings.HasPrefix(l.Function.Name, "runtime.") {
2106
2107
2108
2109
2110
2111
2112 mayBeLabeled = true
2113 }
2114 }
2115 }
2116 errorStack := func(f string, args ...any) {
2117 var buf strings.Builder
2118 fprintStack(&buf, s.Location)
2119 t.Errorf("%s: %s", fmt.Sprintf(f, args...), buf.String())
2120 }
2121 if mustBeLabeled != "" && mustNotBeLabeled != "" {
2122 errorStack("sample contains both %s, which must be labeled, and %s, which must not be labeled", mustBeLabeled, mustNotBeLabeled)
2123 continue
2124 }
2125 if mustBeLabeled != "" || mustNotBeLabeled != "" {
2126
2127 mayBeLabeled = false
2128 }
2129 if mayBeLabeled {
2130
2131 continue
2132 }
2133 if mustBeLabeled != "" && !isLabeled {
2134 errorStack("sample must be labeled because of %s, but is not", mustBeLabeled)
2135 }
2136 if mustNotBeLabeled != "" && isLabeled {
2137 errorStack("sample must not be labeled because of %s, but is", mustNotBeLabeled)
2138 }
2139 }
2140 }
2141
2142
2143
2144 func labelHog(stop chan struct{}, gogc int) {
2145
2146
2147 runtime.GC()
2148
2149 for i := 0; ; i++ {
2150 select {
2151 case <-stop:
2152 return
2153 default:
2154 debug.SetGCPercent(gogc)
2155 }
2156 }
2157 }
2158
2159
2160 func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
2161 var wg sync.WaitGroup
2162 stop := make(chan struct{})
2163 for i := 0; i < runtime.GOMAXPROCS(0); i++ {
2164 wg.Add(1)
2165 go func() {
2166 defer wg.Done()
2167 labelHog(stop, gogc)
2168 }()
2169 }
2170
2171 time.Sleep(dur)
2172 close(stop)
2173 wg.Wait()
2174 }
2175
2176
2177
2178 func TestAtomicLoadStore64(t *testing.T) {
2179 f, err := os.CreateTemp("", "profatomic")
2180 if err != nil {
2181 t.Fatalf("TempFile: %v", err)
2182 }
2183 defer os.Remove(f.Name())
2184 defer f.Close()
2185
2186 if err := StartCPUProfile(f); err != nil {
2187 t.Fatal(err)
2188 }
2189 defer StopCPUProfile()
2190
2191 var flag uint64
2192 done := make(chan bool, 1)
2193
2194 go func() {
2195 for atomic.LoadUint64(&flag) == 0 {
2196 runtime.Gosched()
2197 }
2198 done <- true
2199 }()
2200 time.Sleep(50 * time.Millisecond)
2201 atomic.StoreUint64(&flag, 1)
2202 <-done
2203 }
2204
2205 func TestTracebackAll(t *testing.T) {
2206
2207
2208 f, err := os.CreateTemp("", "proftraceback")
2209 if err != nil {
2210 t.Fatalf("TempFile: %v", err)
2211 }
2212 defer os.Remove(f.Name())
2213 defer f.Close()
2214
2215 if err := StartCPUProfile(f); err != nil {
2216 t.Fatal(err)
2217 }
2218 defer StopCPUProfile()
2219
2220 ch := make(chan int)
2221 defer close(ch)
2222
2223 count := 10
2224 for i := 0; i < count; i++ {
2225 go func() {
2226 <-ch
2227 }()
2228 }
2229
2230 N := 10000
2231 if testing.Short() {
2232 N = 500
2233 }
2234 buf := make([]byte, 10*1024)
2235 for i := 0; i < N; i++ {
2236 runtime.Stack(buf, true)
2237 }
2238 }
2239
2240
2241
2242
2243
2244
2245
2246 func TestTryAdd(t *testing.T) {
2247 if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
2248 t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
2249 }
2250
2251
2252
2253 pcs := make([]uintptr, 2)
2254 inlinedCallerDump(pcs)
2255 inlinedCallerStack := make([]uint64, 2)
2256 for i := range pcs {
2257 inlinedCallerStack[i] = uint64(pcs[i])
2258 }
2259 wrapperPCs := make([]uintptr, 1)
2260 inlinedWrapperCallerDump(wrapperPCs)
2261
2262 if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
2263 t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
2264 }
2265
2266
2267
2268
2269
2270
2271
2272 pcs = make([]uintptr, 6)
2273 recursionChainTop(1, pcs)
2274 recursionStack := make([]uint64, len(pcs))
2275 for i := range pcs {
2276 recursionStack[i] = uint64(pcs[i])
2277 }
2278
2279 period := int64(2000 * 1000)
2280
2281 testCases := []struct {
2282 name string
2283 input []uint64
2284 count int
2285 wantLocs [][]string
2286 wantSamples []*profile.Sample
2287 }{{
2288
2289 name: "full_stack_trace",
2290 input: []uint64{
2291 3, 0, 500,
2292 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2293 },
2294 count: 2,
2295 wantLocs: [][]string{
2296 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2297 },
2298 wantSamples: []*profile.Sample{
2299 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2300 },
2301 }, {
2302 name: "bug35538",
2303 input: []uint64{
2304 3, 0, 500,
2305
2306
2307
2308 7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
2309 5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
2310 },
2311 count: 3,
2312 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2313 wantSamples: []*profile.Sample{
2314 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
2315 {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
2316 },
2317 }, {
2318 name: "bug38096",
2319 input: []uint64{
2320 3, 0, 500,
2321
2322
2323 4, 0, 0, 4242,
2324 },
2325 count: 2,
2326 wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
2327 wantSamples: []*profile.Sample{
2328 {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
2329 },
2330 }, {
2331
2332
2333
2334
2335
2336
2337
2338 name: "directly_recursive_func_is_not_inlined",
2339 input: []uint64{
2340 3, 0, 500,
2341 5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
2342 4, 0, 40, inlinedCallerStack[0],
2343 },
2344 count: 3,
2345
2346
2347 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
2348 wantSamples: []*profile.Sample{
2349 {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
2350 {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
2351 },
2352 }, {
2353 name: "recursion_chain_inline",
2354 input: []uint64{
2355 3, 0, 500,
2356 9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
2357 },
2358 count: 2,
2359 wantLocs: [][]string{
2360 {"runtime/pprof.recursionChainBottom"},
2361 {
2362 "runtime/pprof.recursionChainMiddle",
2363 "runtime/pprof.recursionChainTop",
2364 "runtime/pprof.recursionChainBottom",
2365 },
2366 {
2367 "runtime/pprof.recursionChainMiddle",
2368 "runtime/pprof.recursionChainTop",
2369 "runtime/pprof.TestTryAdd",
2370 },
2371 },
2372 wantSamples: []*profile.Sample{
2373 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
2374 },
2375 }, {
2376 name: "truncated_stack_trace_later",
2377 input: []uint64{
2378 3, 0, 500,
2379 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2380 4, 0, 60, inlinedCallerStack[0],
2381 },
2382 count: 3,
2383 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2384 wantSamples: []*profile.Sample{
2385 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2386 {Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
2387 },
2388 }, {
2389 name: "truncated_stack_trace_first",
2390 input: []uint64{
2391 3, 0, 500,
2392 4, 0, 70, inlinedCallerStack[0],
2393 5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
2394 },
2395 count: 3,
2396 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2397 wantSamples: []*profile.Sample{
2398 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2399 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
2400 },
2401 }, {
2402
2403 name: "truncated_stack_trace_only",
2404 input: []uint64{
2405 3, 0, 500,
2406 4, 0, 70, inlinedCallerStack[0],
2407 },
2408 count: 2,
2409 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2410 wantSamples: []*profile.Sample{
2411 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2412 },
2413 }, {
2414
2415 name: "truncated_stack_trace_twice",
2416 input: []uint64{
2417 3, 0, 500,
2418 4, 0, 70, inlinedCallerStack[0],
2419
2420
2421
2422 5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
2423 },
2424 count: 3,
2425 wantLocs: [][]string{
2426 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2427 {"runtime/pprof.inlinedCallerDump"},
2428 },
2429 wantSamples: []*profile.Sample{
2430 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2431 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
2432 },
2433 }, {
2434 name: "expand_wrapper_function",
2435 input: []uint64{
2436 3, 0, 500,
2437 4, 0, 50, uint64(wrapperPCs[0]),
2438 },
2439 count: 2,
2440 wantLocs: [][]string{{"runtime/pprof.inlineWrapper.dump"}},
2441 wantSamples: []*profile.Sample{
2442 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2443 },
2444 }}
2445
2446 for _, tc := range testCases {
2447 t.Run(tc.name, func(t *testing.T) {
2448 p, err := translateCPUProfile(tc.input, tc.count)
2449 if err != nil {
2450 t.Fatalf("translating profile: %v", err)
2451 }
2452 t.Logf("Profile: %v\n", p)
2453
2454
2455 var gotLoc [][]string
2456 for _, loc := range p.Location {
2457 var names []string
2458 for _, line := range loc.Line {
2459 names = append(names, line.Function.Name)
2460 }
2461 gotLoc = append(gotLoc, names)
2462 }
2463 if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
2464 t.Errorf("Got Location = %+v\n\twant %+v", got, want)
2465 }
2466
2467 var gotSamples []*profile.Sample
2468 for _, sample := range p.Sample {
2469 var locs []*profile.Location
2470 for _, loc := range sample.Location {
2471 locs = append(locs, &profile.Location{ID: loc.ID})
2472 }
2473 gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
2474 }
2475 if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
2476 t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
2477 }
2478 })
2479 }
2480 }
2481
2482 func TestTimeVDSO(t *testing.T) {
2483
2484
2485
2486 if runtime.GOOS == "android" {
2487
2488 testenv.SkipFlaky(t, 48655)
2489 }
2490
2491 matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
2492 p := testCPUProfile(t, matches, func(dur time.Duration) {
2493 t0 := time.Now()
2494 for {
2495 t := time.Now()
2496 if t.Sub(t0) >= dur {
2497 return
2498 }
2499 }
2500 })
2501
2502
2503 for _, sample := range p.Sample {
2504 var seenNow bool
2505 for _, loc := range sample.Location {
2506 for _, line := range loc.Line {
2507 if line.Function.Name == "time.now" {
2508 if seenNow {
2509 t.Fatalf("unexpected recursive time.now")
2510 }
2511 seenNow = true
2512 }
2513 }
2514 }
2515 }
2516 }
2517
2518 func TestProfilerStackDepth(t *testing.T) {
2519 t.Cleanup(disableSampling())
2520
2521 const depth = 128
2522 go produceProfileEvents(t, depth)
2523 awaitBlockedGoroutine(t, "chan receive", "goroutineDeep", 1)
2524
2525 tests := []struct {
2526 profiler string
2527 prefix []string
2528 }{
2529 {"heap", []string{"runtime/pprof.allocDeep"}},
2530 {"block", []string{"runtime.chanrecv1", "runtime/pprof.blockChanDeep"}},
2531 {"mutex", []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexDeep"}},
2532 {"goroutine", []string{"runtime.gopark", "runtime.chanrecv", "runtime.chanrecv1", "runtime/pprof.goroutineDeep"}},
2533 }
2534
2535 for _, test := range tests {
2536 t.Run(test.profiler, func(t *testing.T) {
2537 var buf bytes.Buffer
2538 if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil {
2539 t.Fatalf("failed to write heap profile: %v", err)
2540 }
2541 p, err := profile.Parse(&buf)
2542 if err != nil {
2543 t.Fatalf("failed to parse heap profile: %v", err)
2544 }
2545 t.Logf("Profile = %v", p)
2546
2547 stks := profileStacks(p)
2548 var matchedStacks [][]string
2549 for _, stk := range stks {
2550 if !hasPrefix(stk, test.prefix) {
2551 continue
2552 }
2553
2554
2555
2556
2557 matchedStacks = append(matchedStacks, stk)
2558 if len(stk) != depth {
2559 continue
2560 }
2561 if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
2562 continue
2563 }
2564
2565 return
2566 }
2567 for _, stk := range matchedStacks {
2568 t.Logf("matched stack=%s", stk)
2569 if len(stk) != depth {
2570 t.Errorf("want stack depth = %d, got %d", depth, len(stk))
2571 }
2572
2573 if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
2574 t.Errorf("want stack stack root %s, got %v", wantFn, rootFn)
2575 }
2576 }
2577 })
2578 }
2579 }
2580
2581 func hasPrefix(stk []string, prefix []string) bool {
2582 if len(prefix) > len(stk) {
2583 return false
2584 }
2585 for i := range prefix {
2586 if stk[i] != prefix[i] {
2587 return false
2588 }
2589 }
2590 return true
2591 }
2592
2593
2594 var _ = map[runtime.MemProfileRecord]struct{}{}
2595 var _ = map[runtime.StackRecord]struct{}{}
2596
2597
2598 func allocDeep(n int) {
2599 if n > 1 {
2600 allocDeep(n - 1)
2601 return
2602 }
2603 memSink = make([]byte, 1<<20)
2604 }
2605
2606
2607
2608 func blockChanDeep(t *testing.T, n int) {
2609 if n > 1 {
2610 blockChanDeep(t, n-1)
2611 return
2612 }
2613 ch := make(chan struct{})
2614 go func() {
2615 awaitBlockedGoroutine(t, "chan receive", "blockChanDeep", 1)
2616 ch <- struct{}{}
2617 }()
2618 <-ch
2619 }
2620
2621
2622
2623 func blockMutexDeep(t *testing.T, n int) {
2624 if n > 1 {
2625 blockMutexDeep(t, n-1)
2626 return
2627 }
2628 var mu sync.Mutex
2629 go func() {
2630 mu.Lock()
2631 mu.Lock()
2632 }()
2633 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutexDeep", 1)
2634 mu.Unlock()
2635 }
2636
2637
2638
2639 func goroutineDeep(t *testing.T, n int) {
2640 if n > 1 {
2641 goroutineDeep(t, n-1)
2642 return
2643 }
2644 wait := make(chan struct{}, 1)
2645 t.Cleanup(func() {
2646 wait <- struct{}{}
2647 })
2648 <-wait
2649 }
2650
2651
2652
2653
2654
2655 func produceProfileEvents(t *testing.T, depth int) {
2656 allocDeep(depth - 1)
2657 blockChanDeep(t, depth-2)
2658 blockMutexDeep(t, depth-2)
2659 memSink = nil
2660 runtime.GC()
2661 goroutineDeep(t, depth-4)
2662 }
2663
2664 func getProfileStacks(collect func([]runtime.BlockProfileRecord) (int, bool), fileLine bool) []string {
2665 var n int
2666 var ok bool
2667 var p []runtime.BlockProfileRecord
2668 for {
2669 p = make([]runtime.BlockProfileRecord, n)
2670 n, ok = collect(p)
2671 if ok {
2672 p = p[:n]
2673 break
2674 }
2675 }
2676 var stacks []string
2677 for _, r := range p {
2678 var stack strings.Builder
2679 for i, pc := range r.Stack() {
2680 if i > 0 {
2681 stack.WriteByte('\n')
2682 }
2683
2684
2685
2686
2687
2688 f := runtime.FuncForPC(pc - 1)
2689 stack.WriteString(f.Name())
2690 if fileLine {
2691 stack.WriteByte(' ')
2692 file, line := f.FileLine(pc - 1)
2693 stack.WriteString(file)
2694 stack.WriteByte(':')
2695 stack.WriteString(strconv.Itoa(line))
2696 }
2697 }
2698 stacks = append(stacks, stack.String())
2699 }
2700 return stacks
2701 }
2702
2703 func TestMutexBlockFullAggregation(t *testing.T) {
2704
2705
2706
2707
2708 var m sync.Mutex
2709
2710 prev := runtime.SetMutexProfileFraction(-1)
2711 defer runtime.SetMutexProfileFraction(prev)
2712
2713 const fraction = 1
2714 const iters = 100
2715 const workers = 2
2716
2717 runtime.SetMutexProfileFraction(fraction)
2718 runtime.SetBlockProfileRate(1)
2719 defer runtime.SetBlockProfileRate(0)
2720
2721 wg := sync.WaitGroup{}
2722 wg.Add(workers)
2723 for j := 0; j < workers; j++ {
2724 go func() {
2725 for i := 0; i < iters; i++ {
2726 m.Lock()
2727
2728
2729 time.Sleep(time.Millisecond)
2730 m.Unlock()
2731 }
2732 wg.Done()
2733 }()
2734 }
2735 wg.Wait()
2736
2737 assertNoDuplicates := func(name string, collect func([]runtime.BlockProfileRecord) (int, bool)) {
2738 stacks := getProfileStacks(collect, true)
2739 seen := make(map[string]struct{})
2740 for _, s := range stacks {
2741 if _, ok := seen[s]; ok {
2742 t.Errorf("saw duplicate entry in %s profile with stack:\n%s", name, s)
2743 }
2744 seen[s] = struct{}{}
2745 }
2746 if len(seen) == 0 {
2747 t.Errorf("did not see any samples in %s profile for this test", name)
2748 }
2749 }
2750 t.Run("mutex", func(t *testing.T) {
2751 assertNoDuplicates("mutex", runtime.MutexProfile)
2752 })
2753 t.Run("block", func(t *testing.T) {
2754 assertNoDuplicates("block", runtime.BlockProfile)
2755 })
2756 }
2757
2758 func inlineA(mu *sync.Mutex, wg *sync.WaitGroup) { inlineB(mu, wg) }
2759 func inlineB(mu *sync.Mutex, wg *sync.WaitGroup) { inlineC(mu, wg) }
2760 func inlineC(mu *sync.Mutex, wg *sync.WaitGroup) {
2761 defer wg.Done()
2762 mu.Lock()
2763 mu.Unlock()
2764 }
2765
2766 func inlineD(mu *sync.Mutex, wg *sync.WaitGroup) { inlineE(mu, wg) }
2767 func inlineE(mu *sync.Mutex, wg *sync.WaitGroup) { inlineF(mu, wg) }
2768 func inlineF(mu *sync.Mutex, wg *sync.WaitGroup) {
2769 defer wg.Done()
2770 mu.Unlock()
2771 }
2772
2773 func TestBlockMutexProfileInlineExpansion(t *testing.T) {
2774 runtime.SetBlockProfileRate(1)
2775 defer runtime.SetBlockProfileRate(0)
2776 prev := runtime.SetMutexProfileFraction(1)
2777 defer runtime.SetMutexProfileFraction(prev)
2778
2779 var mu sync.Mutex
2780 var wg sync.WaitGroup
2781 wg.Add(2)
2782 mu.Lock()
2783 go inlineA(&mu, &wg)
2784 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "inlineC", 1)
2785
2786 go inlineD(&mu, &wg)
2787 wg.Wait()
2788
2789 tcs := []struct {
2790 Name string
2791 Collect func([]runtime.BlockProfileRecord) (int, bool)
2792 SubStack string
2793 }{
2794 {
2795 Name: "mutex",
2796 Collect: runtime.MutexProfile,
2797 SubStack: `sync.(*Mutex).Unlock
2798 runtime/pprof.inlineF
2799 runtime/pprof.inlineE
2800 runtime/pprof.inlineD`,
2801 },
2802 {
2803 Name: "block",
2804 Collect: runtime.BlockProfile,
2805 SubStack: `sync.(*Mutex).Lock
2806 runtime/pprof.inlineC
2807 runtime/pprof.inlineB
2808 runtime/pprof.inlineA`,
2809 },
2810 }
2811
2812 for _, tc := range tcs {
2813 t.Run(tc.Name, func(t *testing.T) {
2814 stacks := getProfileStacks(tc.Collect, false)
2815 for _, s := range stacks {
2816 if strings.Contains(s, tc.SubStack) {
2817 return
2818 }
2819 }
2820 t.Error("did not see expected stack")
2821 t.Logf("wanted:\n%s", tc.SubStack)
2822 t.Logf("got: %s", stacks)
2823 })
2824 }
2825 }
2826
2827 func TestProfileRecordNullPadding(t *testing.T) {
2828
2829 t.Cleanup(disableSampling())
2830 memSink = make([]byte, 1)
2831 <-time.After(time.Millisecond)
2832 blockMutex(t)
2833 runtime.GC()
2834
2835
2836 testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
2837 testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
2838 testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
2839 testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
2840 return runtime.MemProfile(p, true)
2841 })
2842 testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
2843 return runtime.MemProfile(p, false)
2844 })
2845
2846 }
2847
2848 func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
2849 stack0 := func(sr *T) *[32]uintptr {
2850 switch t := any(sr).(type) {
2851 case *runtime.StackRecord:
2852 return &t.Stack0
2853 case *runtime.MemProfileRecord:
2854 return &t.Stack0
2855 case *runtime.BlockProfileRecord:
2856 return &t.Stack0
2857 default:
2858 panic(fmt.Sprintf("unexpected type %T", sr))
2859 }
2860 }
2861
2862 t.Run(name, func(t *testing.T) {
2863 var p []T
2864 for {
2865 n, ok := fn(p)
2866 if ok {
2867 p = p[:n]
2868 break
2869 }
2870 p = make([]T, n*2)
2871 for i := range p {
2872 s0 := stack0(&p[i])
2873 for j := range s0 {
2874
2875 s0[j] = ^uintptr(0)
2876 }
2877 }
2878 }
2879
2880 if len(p) == 0 {
2881 t.Fatal("no records found")
2882 }
2883
2884 for _, sr := range p {
2885 for i, v := range stack0(&sr) {
2886 if v == ^uintptr(0) {
2887 t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
2888 }
2889 }
2890 }
2891 })
2892 }
2893
2894
2895
2896 func disableSampling() func() {
2897 oldMemRate := runtime.MemProfileRate
2898 runtime.MemProfileRate = 1
2899 runtime.SetBlockProfileRate(1)
2900 oldMutexRate := runtime.SetMutexProfileFraction(1)
2901 return func() {
2902 runtime.MemProfileRate = oldMemRate
2903 runtime.SetBlockProfileRate(0)
2904 runtime.SetMutexProfileFraction(oldMutexRate)
2905 }
2906 }
2907
View as plain text