Source file src/internal/trace/trace_test.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package trace_test
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"fmt"
    11  	"internal/race"
    12  	"internal/testenv"
    13  	"internal/trace"
    14  	"internal/trace/testtrace"
    15  	"io"
    16  	"os"
    17  	"path/filepath"
    18  	"runtime"
    19  	"slices"
    20  	"strings"
    21  	"testing"
    22  )
    23  
    24  func TestTraceAnnotations(t *testing.T) {
    25  	testTraceProg(t, "annotations.go", func(t *testing.T, tb, _ []byte, _ bool) {
    26  		type evDesc struct {
    27  			kind trace.EventKind
    28  			task trace.TaskID
    29  			args []string
    30  		}
    31  		want := []evDesc{
    32  			{trace.EventTaskBegin, trace.TaskID(1), []string{"task0"}},
    33  			{trace.EventRegionBegin, trace.TaskID(1), []string{"region0"}},
    34  			{trace.EventRegionBegin, trace.TaskID(1), []string{"region1"}},
    35  			{trace.EventLog, trace.TaskID(1), []string{"key0", "0123456789abcdef"}},
    36  			{trace.EventRegionEnd, trace.TaskID(1), []string{"region1"}},
    37  			{trace.EventRegionEnd, trace.TaskID(1), []string{"region0"}},
    38  			{trace.EventTaskEnd, trace.TaskID(1), []string{"task0"}},
    39  			//  Currently, pre-existing region is not recorded to avoid allocations.
    40  			{trace.EventRegionBegin, trace.BackgroundTask, []string{"post-existing region"}},
    41  		}
    42  		r, err := trace.NewReader(bytes.NewReader(tb))
    43  		if err != nil {
    44  			t.Error(err)
    45  		}
    46  		for {
    47  			ev, err := r.ReadEvent()
    48  			if err == io.EOF {
    49  				break
    50  			}
    51  			if err != nil {
    52  				t.Fatal(err)
    53  			}
    54  			for i, wantEv := range want {
    55  				if wantEv.kind != ev.Kind() {
    56  					continue
    57  				}
    58  				match := false
    59  				switch ev.Kind() {
    60  				case trace.EventTaskBegin, trace.EventTaskEnd:
    61  					task := ev.Task()
    62  					match = task.ID == wantEv.task && task.Type == wantEv.args[0]
    63  				case trace.EventRegionBegin, trace.EventRegionEnd:
    64  					reg := ev.Region()
    65  					match = reg.Task == wantEv.task && reg.Type == wantEv.args[0]
    66  				case trace.EventLog:
    67  					log := ev.Log()
    68  					match = log.Task == wantEv.task && log.Category == wantEv.args[0] && log.Message == wantEv.args[1]
    69  				}
    70  				if match {
    71  					want[i] = want[len(want)-1]
    72  					want = want[:len(want)-1]
    73  					break
    74  				}
    75  			}
    76  		}
    77  		if len(want) != 0 {
    78  			for _, ev := range want {
    79  				t.Errorf("no match for %s TaskID=%d Args=%#v", ev.kind, ev.task, ev.args)
    80  			}
    81  		}
    82  	})
    83  }
    84  
    85  func TestTraceAnnotationsStress(t *testing.T) {
    86  	testTraceProg(t, "annotations-stress.go", nil)
    87  }
    88  
    89  func TestTraceCgoCallback(t *testing.T) {
    90  	testenv.MustHaveCGO(t)
    91  
    92  	switch runtime.GOOS {
    93  	case "plan9", "windows":
    94  		t.Skipf("cgo callback test requires pthreads and is not supported on %s", runtime.GOOS)
    95  	}
    96  	testTraceProg(t, "cgo-callback.go", nil)
    97  }
    98  
    99  func TestTraceCPUProfile(t *testing.T) {
   100  	testTraceProg(t, "cpu-profile.go", func(t *testing.T, tb, stderr []byte, _ bool) {
   101  		// Parse stderr which has a CPU profile summary, if everything went well.
   102  		// (If it didn't, we shouldn't even make it here.)
   103  		scanner := bufio.NewScanner(bytes.NewReader(stderr))
   104  		pprofSamples := 0
   105  		pprofStacks := make(map[string]int)
   106  		for scanner.Scan() {
   107  			var stack string
   108  			var samples int
   109  			_, err := fmt.Sscanf(scanner.Text(), "%s\t%d", &stack, &samples)
   110  			if err != nil {
   111  				t.Fatalf("failed to parse CPU profile summary in stderr: %s\n\tfull:\n%s", scanner.Text(), stderr)
   112  			}
   113  			pprofStacks[stack] = samples
   114  			pprofSamples += samples
   115  		}
   116  		if err := scanner.Err(); err != nil {
   117  			t.Fatalf("failed to parse CPU profile summary in stderr: %v", err)
   118  		}
   119  		if pprofSamples == 0 {
   120  			t.Skip("CPU profile did not include any samples while tracing was active")
   121  		}
   122  
   123  		// Examine the execution tracer's view of the CPU profile samples. Filter it
   124  		// to only include samples from the single test goroutine. Use the goroutine
   125  		// ID that was recorded in the events: that should reflect getg().m.curg,
   126  		// same as the profiler's labels (even when the M is using its g0 stack).
   127  		totalTraceSamples := 0
   128  		traceSamples := 0
   129  		traceStacks := make(map[string]int)
   130  		r, err := trace.NewReader(bytes.NewReader(tb))
   131  		if err != nil {
   132  			t.Error(err)
   133  		}
   134  		var hogRegion *trace.Event
   135  		var hogRegionClosed bool
   136  		for {
   137  			ev, err := r.ReadEvent()
   138  			if err == io.EOF {
   139  				break
   140  			}
   141  			if err != nil {
   142  				t.Fatal(err)
   143  			}
   144  			if ev.Kind() == trace.EventRegionBegin && ev.Region().Type == "cpuHogger" {
   145  				hogRegion = &ev
   146  			}
   147  			if ev.Kind() == trace.EventStackSample {
   148  				totalTraceSamples++
   149  				if hogRegion != nil && ev.Goroutine() == hogRegion.Goroutine() {
   150  					traceSamples++
   151  					var fns []string
   152  					for frame := range ev.Stack().Frames() {
   153  						if frame.Func != "runtime.goexit" {
   154  							fns = append(fns, fmt.Sprintf("%s:%d", frame.Func, frame.Line))
   155  						}
   156  					}
   157  					stack := strings.Join(fns, "|")
   158  					traceStacks[stack]++
   159  				}
   160  			}
   161  			if ev.Kind() == trace.EventRegionEnd && ev.Region().Type == "cpuHogger" {
   162  				hogRegionClosed = true
   163  			}
   164  		}
   165  		if hogRegion == nil {
   166  			t.Fatalf("execution trace did not identify cpuHogger goroutine")
   167  		} else if !hogRegionClosed {
   168  			t.Fatalf("execution trace did not close cpuHogger region")
   169  		}
   170  
   171  		// The execution trace may drop CPU profile samples if the profiling buffer
   172  		// overflows. Based on the size of profBufWordCount, that takes a bit over
   173  		// 1900 CPU samples or 19 thread-seconds at a 100 Hz sample rate. If we've
   174  		// hit that case, then we definitely have at least one full buffer's worth
   175  		// of CPU samples, so we'll call that success.
   176  		overflowed := totalTraceSamples >= 1900
   177  		if traceSamples < pprofSamples {
   178  			t.Logf("execution trace did not include all CPU profile samples; %d in profile, %d in trace", pprofSamples, traceSamples)
   179  			if !overflowed {
   180  				t.Fail()
   181  			}
   182  		}
   183  
   184  		for stack, traceSamples := range traceStacks {
   185  			pprofSamples := pprofStacks[stack]
   186  			delete(pprofStacks, stack)
   187  			if traceSamples < pprofSamples {
   188  				t.Logf("execution trace did not include all CPU profile samples for stack %q; %d in profile, %d in trace",
   189  					stack, pprofSamples, traceSamples)
   190  				if !overflowed {
   191  					t.Fail()
   192  				}
   193  			}
   194  		}
   195  		for stack, pprofSamples := range pprofStacks {
   196  			t.Logf("CPU profile included %d samples at stack %q not present in execution trace", pprofSamples, stack)
   197  			if !overflowed {
   198  				t.Fail()
   199  			}
   200  		}
   201  
   202  		if t.Failed() {
   203  			t.Logf("execution trace CPU samples:")
   204  			for stack, samples := range traceStacks {
   205  				t.Logf("%d: %q", samples, stack)
   206  			}
   207  			t.Logf("CPU profile:\n%s", stderr)
   208  		}
   209  	})
   210  }
   211  
   212  func TestTraceFutileWakeup(t *testing.T) {
   213  	testTraceProg(t, "futile-wakeup.go", func(t *testing.T, tb, _ []byte, _ bool) {
   214  		// Check to make sure that no goroutine in the "special" trace region
   215  		// ends up blocking, unblocking, then immediately blocking again.
   216  		//
   217  		// The goroutines are careful to call runtime.Gosched in between blocking,
   218  		// so there should never be a clean block/unblock on the goroutine unless
   219  		// the runtime was generating extraneous events.
   220  		const (
   221  			entered = iota
   222  			blocked
   223  			runnable
   224  			running
   225  		)
   226  		gs := make(map[trace.GoID]int)
   227  		seenSpecialGoroutines := false
   228  		r, err := trace.NewReader(bytes.NewReader(tb))
   229  		if err != nil {
   230  			t.Error(err)
   231  		}
   232  		for {
   233  			ev, err := r.ReadEvent()
   234  			if err == io.EOF {
   235  				break
   236  			}
   237  			if err != nil {
   238  				t.Fatal(err)
   239  			}
   240  			// Only track goroutines in the special region we control, so runtime
   241  			// goroutines don't interfere (it's totally valid in traces for a
   242  			// goroutine to block, run, and block again; that's not what we care about).
   243  			if ev.Kind() == trace.EventRegionBegin && ev.Region().Type == "special" {
   244  				seenSpecialGoroutines = true
   245  				gs[ev.Goroutine()] = entered
   246  			}
   247  			if ev.Kind() == trace.EventRegionEnd && ev.Region().Type == "special" {
   248  				delete(gs, ev.Goroutine())
   249  			}
   250  			// Track state transitions for goroutines we care about.
   251  			//
   252  			// The goroutines we care about will advance through the state machine
   253  			// of entered -> blocked -> runnable -> running. If in the running state
   254  			// we block, then we have a futile wakeup. Because of the runtime.Gosched
   255  			// on these specially marked goroutines, we should end up back in runnable
   256  			// first. If at any point we go to a different state, switch back to entered
   257  			// and wait for the next time the goroutine blocks.
   258  			if ev.Kind() != trace.EventStateTransition {
   259  				continue
   260  			}
   261  			st := ev.StateTransition()
   262  			if st.Resource.Kind != trace.ResourceGoroutine {
   263  				continue
   264  			}
   265  			id := st.Resource.Goroutine()
   266  			state, ok := gs[id]
   267  			if !ok {
   268  				continue
   269  			}
   270  			_, new := st.Goroutine()
   271  			switch state {
   272  			case entered:
   273  				if new == trace.GoWaiting {
   274  					state = blocked
   275  				} else {
   276  					state = entered
   277  				}
   278  			case blocked:
   279  				if new == trace.GoRunnable {
   280  					state = runnable
   281  				} else {
   282  					state = entered
   283  				}
   284  			case runnable:
   285  				if new == trace.GoRunning {
   286  					state = running
   287  				} else {
   288  					state = entered
   289  				}
   290  			case running:
   291  				if new == trace.GoWaiting {
   292  					t.Fatalf("found futile wakeup on goroutine %d", id)
   293  				} else {
   294  					state = entered
   295  				}
   296  			}
   297  			gs[id] = state
   298  		}
   299  		if !seenSpecialGoroutines {
   300  			t.Fatal("did not see a goroutine in a the region 'special'")
   301  		}
   302  	})
   303  }
   304  
   305  func TestTraceGCStress(t *testing.T) {
   306  	testTraceProg(t, "gc-stress.go", nil)
   307  }
   308  
   309  func TestTraceGOMAXPROCS(t *testing.T) {
   310  	testTraceProg(t, "gomaxprocs.go", nil)
   311  }
   312  
   313  func TestTraceStacks(t *testing.T) {
   314  	testTraceProg(t, "stacks.go", func(t *testing.T, tb, _ []byte, stress bool) {
   315  		type frame struct {
   316  			fn   string
   317  			line int
   318  		}
   319  		type evDesc struct {
   320  			kind   trace.EventKind
   321  			match  string
   322  			frames []frame
   323  		}
   324  		// mainLine is the line number of `func main()` in testprog/stacks.go.
   325  		const mainLine = 21
   326  		want := []evDesc{
   327  			{trace.EventStateTransition, "Goroutine Running->Runnable", []frame{
   328  				{"main.main", mainLine + 82},
   329  			}},
   330  			{trace.EventStateTransition, "Goroutine NotExist->Runnable", []frame{
   331  				{"main.main", mainLine + 11},
   332  			}},
   333  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   334  				{"runtime.block", 0},
   335  				{"main.main.func1", 0},
   336  			}},
   337  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   338  				{"runtime.chansend1", 0},
   339  				{"main.main.func2", 0},
   340  			}},
   341  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   342  				{"runtime.chanrecv1", 0},
   343  				{"main.main.func3", 0},
   344  			}},
   345  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   346  				{"runtime.chanrecv1", 0},
   347  				{"main.main.func4", 0},
   348  			}},
   349  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   350  				{"runtime.chansend1", 0},
   351  				{"main.main", mainLine + 84},
   352  			}},
   353  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   354  				{"runtime.chansend1", 0},
   355  				{"main.main.func5", 0},
   356  			}},
   357  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   358  				{"runtime.chanrecv1", 0},
   359  				{"main.main", mainLine + 85},
   360  			}},
   361  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   362  				{"runtime.selectgo", 0},
   363  				{"main.main.func6", 0},
   364  			}},
   365  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   366  				{"runtime.selectgo", 0},
   367  				{"main.main", mainLine + 86},
   368  			}},
   369  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   370  				{"sync.(*Mutex).Lock", 0},
   371  				{"main.main.func7", 0},
   372  			}},
   373  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   374  				{"sync.(*Mutex).Unlock", 0},
   375  				{"main.main", 0},
   376  			}},
   377  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   378  				{"sync.(*WaitGroup).Wait", 0},
   379  				{"main.main.func8", 0},
   380  			}},
   381  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   382  				{"sync.(*WaitGroup).Add", 0},
   383  				{"sync.(*WaitGroup).Done", 0},
   384  				{"main.main", mainLine + 91},
   385  			}},
   386  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   387  				{"sync.(*Cond).Wait", 0},
   388  				{"main.main.func9", 0},
   389  			}},
   390  			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
   391  				{"sync.(*Cond).Signal", 0},
   392  				{"main.main", 0},
   393  			}},
   394  			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   395  				{"time.Sleep", 0},
   396  				{"main.main", 0},
   397  			}},
   398  			{trace.EventMetric, "/sched/gomaxprocs:threads", []frame{
   399  				{"runtime.startTheWorld", 0}, // this is when the current gomaxprocs is logged.
   400  				{"runtime.startTheWorldGC", 0},
   401  				{"runtime.GOMAXPROCS", 0},
   402  				{"main.main", 0},
   403  			}},
   404  		}
   405  		if !stress {
   406  			// Only check for this stack if !stress because traceAdvance alone could
   407  			// allocate enough memory to trigger a GC if called frequently enough.
   408  			// This might cause the runtime.GC call we're trying to match against to
   409  			// coalesce with an active GC triggered this by traceAdvance. In that case
   410  			// we won't have an EventRangeBegin event that matches the stace trace we're
   411  			// looking for, since runtime.GC will not have triggered the GC.
   412  			gcEv := evDesc{trace.EventRangeBegin, "GC concurrent mark phase", []frame{
   413  				{"runtime.GC", 0},
   414  				{"main.main", 0},
   415  			}}
   416  			want = append(want, gcEv)
   417  		}
   418  		if runtime.GOOS != "windows" && runtime.GOOS != "plan9" {
   419  			want = append(want, []evDesc{
   420  				{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
   421  					{"internal/poll.(*FD).Accept", 0},
   422  					{"net.(*netFD).accept", 0},
   423  					{"net.(*TCPListener).accept", 0},
   424  					{"net.(*TCPListener).Accept", 0},
   425  					{"main.main.func10", 0},
   426  				}},
   427  				{trace.EventStateTransition, "Goroutine Running->Syscall", []frame{
   428  					{"syscall.read", 0},
   429  					{"syscall.Read", 0},
   430  					{"internal/poll.ignoringEINTRIO", 0},
   431  					{"internal/poll.(*FD).Read", 0},
   432  					{"os.(*File).read", 0},
   433  					{"os.(*File).Read", 0},
   434  					{"main.main.func11", 0},
   435  				}},
   436  			}...)
   437  		}
   438  		stackMatches := func(stk trace.Stack, frames []frame) bool {
   439  			for i, f := range slices.Collect(stk.Frames()) {
   440  				if f.Func != frames[i].fn {
   441  					return false
   442  				}
   443  				if line := uint64(frames[i].line); line != 0 && line != f.Line {
   444  					return false
   445  				}
   446  			}
   447  			return true
   448  		}
   449  		r, err := trace.NewReader(bytes.NewReader(tb))
   450  		if err != nil {
   451  			t.Error(err)
   452  		}
   453  		for {
   454  			ev, err := r.ReadEvent()
   455  			if err == io.EOF {
   456  				break
   457  			}
   458  			if err != nil {
   459  				t.Fatal(err)
   460  			}
   461  			for i, wantEv := range want {
   462  				if wantEv.kind != ev.Kind() {
   463  					continue
   464  				}
   465  				match := false
   466  				switch ev.Kind() {
   467  				case trace.EventStateTransition:
   468  					st := ev.StateTransition()
   469  					str := ""
   470  					switch st.Resource.Kind {
   471  					case trace.ResourceGoroutine:
   472  						old, new := st.Goroutine()
   473  						str = fmt.Sprintf("%s %s->%s", st.Resource.Kind, old, new)
   474  					}
   475  					match = str == wantEv.match
   476  				case trace.EventRangeBegin:
   477  					rng := ev.Range()
   478  					match = rng.Name == wantEv.match
   479  				case trace.EventMetric:
   480  					metric := ev.Metric()
   481  					match = metric.Name == wantEv.match
   482  				}
   483  				match = match && stackMatches(ev.Stack(), wantEv.frames)
   484  				if match {
   485  					want[i] = want[len(want)-1]
   486  					want = want[:len(want)-1]
   487  					break
   488  				}
   489  			}
   490  		}
   491  		if len(want) != 0 {
   492  			for _, ev := range want {
   493  				t.Errorf("no match for %s Match=%s Stack=%#v", ev.kind, ev.match, ev.frames)
   494  			}
   495  		}
   496  	})
   497  }
   498  
   499  func TestTraceStress(t *testing.T) {
   500  	switch runtime.GOOS {
   501  	case "js", "wasip1":
   502  		t.Skip("no os.Pipe on " + runtime.GOOS)
   503  	}
   504  	testTraceProg(t, "stress.go", checkReaderDeterminism)
   505  }
   506  
   507  func TestTraceStressStartStop(t *testing.T) {
   508  	switch runtime.GOOS {
   509  	case "js", "wasip1":
   510  		t.Skip("no os.Pipe on " + runtime.GOOS)
   511  	}
   512  	testTraceProg(t, "stress-start-stop.go", nil)
   513  }
   514  
   515  func TestTraceManyStartStop(t *testing.T) {
   516  	testTraceProg(t, "many-start-stop.go", nil)
   517  }
   518  
   519  func TestTraceWaitOnPipe(t *testing.T) {
   520  	switch runtime.GOOS {
   521  	case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
   522  		testTraceProg(t, "wait-on-pipe.go", nil)
   523  		return
   524  	}
   525  	t.Skip("no applicable syscall.Pipe on " + runtime.GOOS)
   526  }
   527  
   528  func TestTraceIterPull(t *testing.T) {
   529  	testTraceProg(t, "iter-pull.go", nil)
   530  }
   531  
   532  func checkReaderDeterminism(t *testing.T, tb, _ []byte, _ bool) {
   533  	events := func() []trace.Event {
   534  		var evs []trace.Event
   535  
   536  		r, err := trace.NewReader(bytes.NewReader(tb))
   537  		if err != nil {
   538  			t.Error(err)
   539  		}
   540  		for {
   541  			ev, err := r.ReadEvent()
   542  			if err == io.EOF {
   543  				break
   544  			}
   545  			if err != nil {
   546  				t.Fatal(err)
   547  			}
   548  			evs = append(evs, ev)
   549  		}
   550  
   551  		return evs
   552  	}
   553  
   554  	evs1 := events()
   555  	evs2 := events()
   556  
   557  	if l1, l2 := len(evs1), len(evs2); l1 != l2 {
   558  		t.Fatalf("re-reading trace gives different event count (%d != %d)", l1, l2)
   559  	}
   560  	for i, ev1 := range evs1 {
   561  		ev2 := evs2[i]
   562  		if s1, s2 := ev1.String(), ev2.String(); s1 != s2 {
   563  			t.Errorf("re-reading trace gives different event %d:\n%s\n%s\n", i, s1, s2)
   564  			break
   565  		}
   566  	}
   567  }
   568  
   569  func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace, stderr []byte, stress bool)) {
   570  	testenv.MustHaveGoRun(t)
   571  
   572  	// Check if we're on a builder.
   573  	onBuilder := testenv.Builder() != ""
   574  	onOldBuilder := !strings.Contains(testenv.Builder(), "gotip") && !strings.Contains(testenv.Builder(), "go1")
   575  
   576  	testPath := filepath.Join("./testdata/testprog", progName)
   577  	testName := progName
   578  	runTest := func(t *testing.T, stress bool, extraGODEBUG string) {
   579  		// Run the program and capture the trace, which is always written to stdout.
   580  		cmd := testenv.Command(t, testenv.GoToolPath(t), "run")
   581  		if race.Enabled {
   582  			cmd.Args = append(cmd.Args, "-race")
   583  		}
   584  		cmd.Args = append(cmd.Args, testPath)
   585  		cmd.Env = append(os.Environ(), "GOEXPERIMENT=rangefunc")
   586  		// Add a stack ownership check. This is cheap enough for testing.
   587  		godebug := "tracecheckstackownership=1"
   588  		if stress {
   589  			// Advance a generation constantly to stress the tracer.
   590  			godebug += ",traceadvanceperiod=0"
   591  		}
   592  		if extraGODEBUG != "" {
   593  			// Add extra GODEBUG flags.
   594  			godebug += "," + extraGODEBUG
   595  		}
   596  		cmd.Env = append(cmd.Env, "GODEBUG="+godebug)
   597  
   598  		// Capture stdout and stderr.
   599  		//
   600  		// The protocol for these programs is that stdout contains the trace data
   601  		// and stderr is an expectation in string format.
   602  		var traceBuf, errBuf bytes.Buffer
   603  		cmd.Stdout = &traceBuf
   604  		cmd.Stderr = &errBuf
   605  		// Run the program.
   606  		if err := cmd.Run(); err != nil {
   607  			if errBuf.Len() != 0 {
   608  				t.Logf("stderr: %s", string(errBuf.Bytes()))
   609  			}
   610  			t.Fatal(err)
   611  		}
   612  		tb := traceBuf.Bytes()
   613  
   614  		// Test the trace and the parser.
   615  		testReader(t, bytes.NewReader(tb), testtrace.ExpectSuccess())
   616  
   617  		// Run some extra validation.
   618  		if !t.Failed() && extra != nil {
   619  			extra(t, tb, errBuf.Bytes(), stress)
   620  		}
   621  
   622  		// Dump some more information on failure.
   623  		if t.Failed() && onBuilder {
   624  			// Dump directly to the test log on the builder, since this
   625  			// data is critical for debugging and this is the only way
   626  			// we can currently make sure it's retained.
   627  			t.Log("found bad trace; dumping to test log...")
   628  			s := dumpTraceToText(t, tb)
   629  			if onOldBuilder && len(s) > 1<<20+512<<10 {
   630  				// The old build infrastructure truncates logs at ~2 MiB.
   631  				// Let's assume we're the only failure and give ourselves
   632  				// up to 1.5 MiB to dump the trace.
   633  				//
   634  				// TODO(mknyszek): Remove this when we've migrated off of
   635  				// the old infrastructure.
   636  				t.Logf("text trace too large to dump (%d bytes)", len(s))
   637  			} else {
   638  				t.Log(s)
   639  			}
   640  		} else if t.Failed() || *dumpTraces {
   641  			// We asked to dump the trace or failed. Write the trace to a file.
   642  			t.Logf("wrote trace to file: %s", dumpTraceToFile(t, testName, stress, tb))
   643  		}
   644  	}
   645  	t.Run("Default", func(t *testing.T) {
   646  		runTest(t, false, "")
   647  	})
   648  	t.Run("Stress", func(t *testing.T) {
   649  		if testing.Short() {
   650  			t.Skip("skipping trace stress tests in short mode")
   651  		}
   652  		runTest(t, true, "")
   653  	})
   654  	t.Run("AllocFree", func(t *testing.T) {
   655  		if testing.Short() {
   656  			t.Skip("skipping trace alloc/free tests in short mode")
   657  		}
   658  		runTest(t, false, "traceallocfree=1")
   659  	})
   660  }
   661  

View as plain text