Source file src/runtime/mprof.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Malloc profiling.
     6  // Patterned after tcmalloc's algorithms; shorter code.
     7  
     8  package runtime
     9  
    10  import (
    11  	"internal/abi"
    12  	"internal/goarch"
    13  	"internal/profilerecord"
    14  	"internal/runtime/atomic"
    15  	"internal/runtime/sys"
    16  	"unsafe"
    17  )
    18  
    19  // NOTE(rsc): Everything here could use cas if contention became an issue.
    20  var (
    21  	// profInsertLock protects changes to the start of all *bucket linked lists
    22  	profInsertLock mutex
    23  	// profBlockLock protects the contents of every blockRecord struct
    24  	profBlockLock mutex
    25  	// profMemActiveLock protects the active field of every memRecord struct
    26  	profMemActiveLock mutex
    27  	// profMemFutureLock is a set of locks that protect the respective elements
    28  	// of the future array of every memRecord struct
    29  	profMemFutureLock [len(memRecord{}.future)]mutex
    30  )
    31  
    32  // All memory allocations are local and do not escape outside of the profiler.
    33  // The profiler is forbidden from referring to garbage-collected memory.
    34  
    35  const (
    36  	// profile types
    37  	memProfile bucketType = 1 + iota
    38  	blockProfile
    39  	mutexProfile
    40  
    41  	// size of bucket hash table
    42  	buckHashSize = 179999
    43  
    44  	// maxSkip is to account for deferred inline expansion
    45  	// when using frame pointer unwinding. We record the stack
    46  	// with "physical" frame pointers but handle skipping "logical"
    47  	// frames at some point after collecting the stack. So
    48  	// we need extra space in order to avoid getting fewer than the
    49  	// desired maximum number of frames after expansion.
    50  	// This should be at least as large as the largest skip value
    51  	// used for profiling; otherwise stacks may be truncated inconsistently
    52  	maxSkip = 5
    53  
    54  	// maxProfStackDepth is the highest valid value for debug.profstackdepth.
    55  	// It's used for the bucket.stk func.
    56  	// TODO(fg): can we get rid of this?
    57  	maxProfStackDepth = 1024
    58  )
    59  
    60  type bucketType int
    61  
    62  // A bucket holds per-call-stack profiling information.
    63  // The representation is a bit sleazy, inherited from C.
    64  // This struct defines the bucket header. It is followed in
    65  // memory by the stack words and then the actual record
    66  // data, either a memRecord or a blockRecord.
    67  //
    68  // Per-call-stack profiling information.
    69  // Lookup by hashing call stack into a linked-list hash table.
    70  //
    71  // None of the fields in this bucket header are modified after
    72  // creation, including its next and allnext links.
    73  //
    74  // No heap pointers.
    75  type bucket struct {
    76  	_       sys.NotInHeap
    77  	next    *bucket
    78  	allnext *bucket
    79  	typ     bucketType // memBucket or blockBucket (includes mutexProfile)
    80  	hash    uintptr
    81  	size    uintptr
    82  	nstk    uintptr
    83  }
    84  
    85  // A memRecord is the bucket data for a bucket of type memProfile,
    86  // part of the memory profile.
    87  type memRecord struct {
    88  	// The following complex 3-stage scheme of stats accumulation
    89  	// is required to obtain a consistent picture of mallocs and frees
    90  	// for some point in time.
    91  	// The problem is that mallocs come in real time, while frees
    92  	// come only after a GC during concurrent sweeping. So if we would
    93  	// naively count them, we would get a skew toward mallocs.
    94  	//
    95  	// Hence, we delay information to get consistent snapshots as
    96  	// of mark termination. Allocations count toward the next mark
    97  	// termination's snapshot, while sweep frees count toward the
    98  	// previous mark termination's snapshot:
    99  	//
   100  	//              MT          MT          MT          MT
   101  	//             .·|         .·|         .·|         .·|
   102  	//          .·˙  |      .·˙  |      .·˙  |      .·˙  |
   103  	//       .·˙     |   .·˙     |   .·˙     |   .·˙     |
   104  	//    .·˙        |.·˙        |.·˙        |.·˙        |
   105  	//
   106  	//       alloc → ▲ ← free
   107  	//               ┠┅┅┅┅┅┅┅┅┅┅┅P
   108  	//       C+2     →    C+1    →  C
   109  	//
   110  	//                   alloc → ▲ ← free
   111  	//                           ┠┅┅┅┅┅┅┅┅┅┅┅P
   112  	//                   C+2     →    C+1    →  C
   113  	//
   114  	// Since we can't publish a consistent snapshot until all of
   115  	// the sweep frees are accounted for, we wait until the next
   116  	// mark termination ("MT" above) to publish the previous mark
   117  	// termination's snapshot ("P" above). To do this, allocation
   118  	// and free events are accounted to *future* heap profile
   119  	// cycles ("C+n" above) and we only publish a cycle once all
   120  	// of the events from that cycle must be done. Specifically:
   121  	//
   122  	// Mallocs are accounted to cycle C+2.
   123  	// Explicit frees are accounted to cycle C+2.
   124  	// GC frees (done during sweeping) are accounted to cycle C+1.
   125  	//
   126  	// After mark termination, we increment the global heap
   127  	// profile cycle counter and accumulate the stats from cycle C
   128  	// into the active profile.
   129  
   130  	// active is the currently published profile. A profiling
   131  	// cycle can be accumulated into active once its complete.
   132  	active memRecordCycle
   133  
   134  	// future records the profile events we're counting for cycles
   135  	// that have not yet been published. This is ring buffer
   136  	// indexed by the global heap profile cycle C and stores
   137  	// cycles C, C+1, and C+2. Unlike active, these counts are
   138  	// only for a single cycle; they are not cumulative across
   139  	// cycles.
   140  	//
   141  	// We store cycle C here because there's a window between when
   142  	// C becomes the active cycle and when we've flushed it to
   143  	// active.
   144  	future [3]memRecordCycle
   145  }
   146  
   147  // memRecordCycle
   148  type memRecordCycle struct {
   149  	allocs, frees           uintptr
   150  	alloc_bytes, free_bytes uintptr
   151  }
   152  
   153  // add accumulates b into a. It does not zero b.
   154  func (a *memRecordCycle) add(b *memRecordCycle) {
   155  	a.allocs += b.allocs
   156  	a.frees += b.frees
   157  	a.alloc_bytes += b.alloc_bytes
   158  	a.free_bytes += b.free_bytes
   159  }
   160  
   161  // A blockRecord is the bucket data for a bucket of type blockProfile,
   162  // which is used in blocking and mutex profiles.
   163  type blockRecord struct {
   164  	count  float64
   165  	cycles int64
   166  }
   167  
   168  var (
   169  	mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
   170  	bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
   171  	xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
   172  	buckhash atomic.UnsafePointer // *buckhashArray
   173  
   174  	mProfCycle mProfCycleHolder
   175  )
   176  
   177  type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
   178  
   179  const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
   180  
   181  // mProfCycleHolder holds the global heap profile cycle number (wrapped at
   182  // mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
   183  // indicate whether future[cycle] in all buckets has been queued to flush into
   184  // the active profile.
   185  type mProfCycleHolder struct {
   186  	value atomic.Uint32
   187  }
   188  
   189  // read returns the current cycle count.
   190  func (c *mProfCycleHolder) read() (cycle uint32) {
   191  	v := c.value.Load()
   192  	cycle = v >> 1
   193  	return cycle
   194  }
   195  
   196  // setFlushed sets the flushed flag. It returns the current cycle count and the
   197  // previous value of the flushed flag.
   198  func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
   199  	for {
   200  		prev := c.value.Load()
   201  		cycle = prev >> 1
   202  		alreadyFlushed = (prev & 0x1) != 0
   203  		next := prev | 0x1
   204  		if c.value.CompareAndSwap(prev, next) {
   205  			return cycle, alreadyFlushed
   206  		}
   207  	}
   208  }
   209  
   210  // increment increases the cycle count by one, wrapping the value at
   211  // mProfCycleWrap. It clears the flushed flag.
   212  func (c *mProfCycleHolder) increment() {
   213  	// We explicitly wrap mProfCycle rather than depending on
   214  	// uint wraparound because the memRecord.future ring does not
   215  	// itself wrap at a power of two.
   216  	for {
   217  		prev := c.value.Load()
   218  		cycle := prev >> 1
   219  		cycle = (cycle + 1) % mProfCycleWrap
   220  		next := cycle << 1
   221  		if c.value.CompareAndSwap(prev, next) {
   222  			break
   223  		}
   224  	}
   225  }
   226  
   227  // newBucket allocates a bucket with the given type and number of stack entries.
   228  func newBucket(typ bucketType, nstk int) *bucket {
   229  	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
   230  	switch typ {
   231  	default:
   232  		throw("invalid profile bucket type")
   233  	case memProfile:
   234  		size += unsafe.Sizeof(memRecord{})
   235  	case blockProfile, mutexProfile:
   236  		size += unsafe.Sizeof(blockRecord{})
   237  	}
   238  
   239  	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
   240  	b.typ = typ
   241  	b.nstk = uintptr(nstk)
   242  	return b
   243  }
   244  
   245  // stk returns the slice in b holding the stack. The caller can assume that the
   246  // backing array is immutable.
   247  func (b *bucket) stk() []uintptr {
   248  	stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
   249  	if b.nstk > maxProfStackDepth {
   250  		// prove that slicing works; otherwise a failure requires a P
   251  		throw("bad profile stack count")
   252  	}
   253  	return stk[:b.nstk:b.nstk]
   254  }
   255  
   256  // mp returns the memRecord associated with the memProfile bucket b.
   257  func (b *bucket) mp() *memRecord {
   258  	if b.typ != memProfile {
   259  		throw("bad use of bucket.mp")
   260  	}
   261  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   262  	return (*memRecord)(data)
   263  }
   264  
   265  // bp returns the blockRecord associated with the blockProfile bucket b.
   266  func (b *bucket) bp() *blockRecord {
   267  	if b.typ != blockProfile && b.typ != mutexProfile {
   268  		throw("bad use of bucket.bp")
   269  	}
   270  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   271  	return (*blockRecord)(data)
   272  }
   273  
   274  // Return the bucket for stk[0:nstk], allocating new bucket if needed.
   275  func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
   276  	bh := (*buckhashArray)(buckhash.Load())
   277  	if bh == nil {
   278  		lock(&profInsertLock)
   279  		// check again under the lock
   280  		bh = (*buckhashArray)(buckhash.Load())
   281  		if bh == nil {
   282  			bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
   283  			if bh == nil {
   284  				throw("runtime: cannot allocate memory")
   285  			}
   286  			buckhash.StoreNoWB(unsafe.Pointer(bh))
   287  		}
   288  		unlock(&profInsertLock)
   289  	}
   290  
   291  	// Hash stack.
   292  	var h uintptr
   293  	for _, pc := range stk {
   294  		h += pc
   295  		h += h << 10
   296  		h ^= h >> 6
   297  	}
   298  	// hash in size
   299  	h += size
   300  	h += h << 10
   301  	h ^= h >> 6
   302  	// finalize
   303  	h += h << 3
   304  	h ^= h >> 11
   305  
   306  	i := int(h % buckHashSize)
   307  	// first check optimistically, without the lock
   308  	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
   309  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   310  			return b
   311  		}
   312  	}
   313  
   314  	if !alloc {
   315  		return nil
   316  	}
   317  
   318  	lock(&profInsertLock)
   319  	// check again under the insertion lock
   320  	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
   321  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   322  			unlock(&profInsertLock)
   323  			return b
   324  		}
   325  	}
   326  
   327  	// Create new bucket.
   328  	b := newBucket(typ, len(stk))
   329  	copy(b.stk(), stk)
   330  	b.hash = h
   331  	b.size = size
   332  
   333  	var allnext *atomic.UnsafePointer
   334  	if typ == memProfile {
   335  		allnext = &mbuckets
   336  	} else if typ == mutexProfile {
   337  		allnext = &xbuckets
   338  	} else {
   339  		allnext = &bbuckets
   340  	}
   341  
   342  	b.next = (*bucket)(bh[i].Load())
   343  	b.allnext = (*bucket)(allnext.Load())
   344  
   345  	bh[i].StoreNoWB(unsafe.Pointer(b))
   346  	allnext.StoreNoWB(unsafe.Pointer(b))
   347  
   348  	unlock(&profInsertLock)
   349  	return b
   350  }
   351  
   352  func eqslice(x, y []uintptr) bool {
   353  	if len(x) != len(y) {
   354  		return false
   355  	}
   356  	for i, xi := range x {
   357  		if xi != y[i] {
   358  			return false
   359  		}
   360  	}
   361  	return true
   362  }
   363  
   364  // mProf_NextCycle publishes the next heap profile cycle and creates a
   365  // fresh heap profile cycle. This operation is fast and can be done
   366  // during STW. The caller must call mProf_Flush before calling
   367  // mProf_NextCycle again.
   368  //
   369  // This is called by mark termination during STW so allocations and
   370  // frees after the world is started again count towards a new heap
   371  // profiling cycle.
   372  func mProf_NextCycle() {
   373  	mProfCycle.increment()
   374  }
   375  
   376  // mProf_Flush flushes the events from the current heap profiling
   377  // cycle into the active profile. After this it is safe to start a new
   378  // heap profiling cycle with mProf_NextCycle.
   379  //
   380  // This is called by GC after mark termination starts the world. In
   381  // contrast with mProf_NextCycle, this is somewhat expensive, but safe
   382  // to do concurrently.
   383  func mProf_Flush() {
   384  	cycle, alreadyFlushed := mProfCycle.setFlushed()
   385  	if alreadyFlushed {
   386  		return
   387  	}
   388  
   389  	index := cycle % uint32(len(memRecord{}.future))
   390  	lock(&profMemActiveLock)
   391  	lock(&profMemFutureLock[index])
   392  	mProf_FlushLocked(index)
   393  	unlock(&profMemFutureLock[index])
   394  	unlock(&profMemActiveLock)
   395  }
   396  
   397  // mProf_FlushLocked flushes the events from the heap profiling cycle at index
   398  // into the active profile. The caller must hold the lock for the active profile
   399  // (profMemActiveLock) and for the profiling cycle at index
   400  // (profMemFutureLock[index]).
   401  func mProf_FlushLocked(index uint32) {
   402  	assertLockHeld(&profMemActiveLock)
   403  	assertLockHeld(&profMemFutureLock[index])
   404  	head := (*bucket)(mbuckets.Load())
   405  	for b := head; b != nil; b = b.allnext {
   406  		mp := b.mp()
   407  
   408  		// Flush cycle C into the published profile and clear
   409  		// it for reuse.
   410  		mpc := &mp.future[index]
   411  		mp.active.add(mpc)
   412  		*mpc = memRecordCycle{}
   413  	}
   414  }
   415  
   416  // mProf_PostSweep records that all sweep frees for this GC cycle have
   417  // completed. This has the effect of publishing the heap profile
   418  // snapshot as of the last mark termination without advancing the heap
   419  // profile cycle.
   420  func mProf_PostSweep() {
   421  	// Flush cycle C+1 to the active profile so everything as of
   422  	// the last mark termination becomes visible. *Don't* advance
   423  	// the cycle, since we're still accumulating allocs in cycle
   424  	// C+2, which have to become C+1 in the next mark termination
   425  	// and so on.
   426  	cycle := mProfCycle.read() + 1
   427  
   428  	index := cycle % uint32(len(memRecord{}.future))
   429  	lock(&profMemActiveLock)
   430  	lock(&profMemFutureLock[index])
   431  	mProf_FlushLocked(index)
   432  	unlock(&profMemFutureLock[index])
   433  	unlock(&profMemActiveLock)
   434  }
   435  
   436  // Called by malloc to record a profiled block.
   437  func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
   438  	if mp.profStack == nil {
   439  		// mp.profStack is nil if we happen to sample an allocation during the
   440  		// initialization of mp. This case is rare, so we just ignore such
   441  		// allocations. Change MemProfileRate to 1 if you need to reproduce such
   442  		// cases for testing purposes.
   443  		return
   444  	}
   445  	// Only use the part of mp.profStack we need and ignore the extra space
   446  	// reserved for delayed inline expansion with frame pointer unwinding.
   447  	nstk := callers(5, mp.profStack[:debug.profstackdepth])
   448  	index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
   449  
   450  	b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
   451  	mr := b.mp()
   452  	mpc := &mr.future[index]
   453  
   454  	lock(&profMemFutureLock[index])
   455  	mpc.allocs++
   456  	mpc.alloc_bytes += size
   457  	unlock(&profMemFutureLock[index])
   458  
   459  	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of
   460  	// the profiler locks. This reduces potential contention and chances of
   461  	// deadlocks. Since the object must be alive during the call to
   462  	// mProf_Malloc, it's fine to do this non-atomically.
   463  	systemstack(func() {
   464  		setprofilebucket(p, b)
   465  	})
   466  }
   467  
   468  // Called when freeing a profiled block.
   469  func mProf_Free(b *bucket, size uintptr) {
   470  	index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
   471  
   472  	mp := b.mp()
   473  	mpc := &mp.future[index]
   474  
   475  	lock(&profMemFutureLock[index])
   476  	mpc.frees++
   477  	mpc.free_bytes += size
   478  	unlock(&profMemFutureLock[index])
   479  }
   480  
   481  var blockprofilerate uint64 // in CPU ticks
   482  
   483  // SetBlockProfileRate controls the fraction of goroutine blocking events
   484  // that are reported in the blocking profile. The profiler aims to sample
   485  // an average of one blocking event per rate nanoseconds spent blocked.
   486  //
   487  // To include every blocking event in the profile, pass rate = 1.
   488  // To turn off profiling entirely, pass rate <= 0.
   489  func SetBlockProfileRate(rate int) {
   490  	var r int64
   491  	if rate <= 0 {
   492  		r = 0 // disable profiling
   493  	} else if rate == 1 {
   494  		r = 1 // profile everything
   495  	} else {
   496  		// convert ns to cycles, use float64 to prevent overflow during multiplication
   497  		r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
   498  		if r == 0 {
   499  			r = 1
   500  		}
   501  	}
   502  
   503  	atomic.Store64(&blockprofilerate, uint64(r))
   504  }
   505  
   506  func blockevent(cycles int64, skip int) {
   507  	if cycles <= 0 {
   508  		cycles = 1
   509  	}
   510  
   511  	rate := int64(atomic.Load64(&blockprofilerate))
   512  	if blocksampled(cycles, rate) {
   513  		saveblockevent(cycles, rate, skip+1, blockProfile)
   514  	}
   515  }
   516  
   517  // blocksampled returns true for all events where cycles >= rate. Shorter
   518  // events have a cycles/rate random chance of returning true.
   519  func blocksampled(cycles, rate int64) bool {
   520  	if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) {
   521  		return false
   522  	}
   523  	return true
   524  }
   525  
   526  // saveblockevent records a profile event of the type specified by which.
   527  // cycles is the quantity associated with this event and rate is the sampling rate,
   528  // used to adjust the cycles value in the manner determined by the profile type.
   529  // skip is the number of frames to omit from the traceback associated with the event.
   530  // The traceback will be recorded from the stack of the goroutine associated with the current m.
   531  // skip should be positive if this event is recorded from the current stack
   532  // (e.g. when this is not called from a system stack)
   533  func saveblockevent(cycles, rate int64, skip int, which bucketType) {
   534  	if debug.profstackdepth == 0 {
   535  		// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
   536  		// can't record a stack trace.
   537  		return
   538  	}
   539  	if skip > maxSkip {
   540  		print("requested skip=", skip)
   541  		throw("invalid skip value")
   542  	}
   543  	gp := getg()
   544  	mp := acquirem() // we must not be preempted while accessing profstack
   545  
   546  	var nstk int
   547  	if tracefpunwindoff() || gp.m.hasCgoOnStack() {
   548  		if gp.m.curg == nil || gp.m.curg == gp {
   549  			nstk = callers(skip, mp.profStack)
   550  		} else {
   551  			nstk = gcallers(gp.m.curg, skip, mp.profStack)
   552  		}
   553  	} else {
   554  		if gp.m.curg == nil || gp.m.curg == gp {
   555  			if skip > 0 {
   556  				// We skip one fewer frame than the provided value for frame
   557  				// pointer unwinding because the skip value includes the current
   558  				// frame, whereas the saved frame pointer will give us the
   559  				// caller's return address first (so, not including
   560  				// saveblockevent)
   561  				skip -= 1
   562  			}
   563  			nstk = fpTracebackPartialExpand(skip, unsafe.Pointer(getfp()), mp.profStack)
   564  		} else {
   565  			mp.profStack[0] = gp.m.curg.sched.pc
   566  			nstk = 1 + fpTracebackPartialExpand(skip, unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[1:])
   567  		}
   568  	}
   569  
   570  	saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
   571  	releasem(mp)
   572  }
   573  
   574  // fpTracebackPartialExpand records a call stack obtained starting from fp.
   575  // This function will skip the given number of frames, properly accounting for
   576  // inlining, and save remaining frames as "physical" return addresses. The
   577  // consumer should later use CallersFrames or similar to expand inline frames.
   578  func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int {
   579  	var n int
   580  	lastFuncID := abi.FuncIDNormal
   581  	skipOrAdd := func(retPC uintptr) bool {
   582  		if skip > 0 {
   583  			skip--
   584  		} else if n < len(pcBuf) {
   585  			pcBuf[n] = retPC
   586  			n++
   587  		}
   588  		return n < len(pcBuf)
   589  	}
   590  	for n < len(pcBuf) && fp != nil {
   591  		// return addr sits one word above the frame pointer
   592  		pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
   593  
   594  		if skip > 0 {
   595  			callPC := pc - 1
   596  			fi := findfunc(callPC)
   597  			u, uf := newInlineUnwinder(fi, callPC)
   598  			for ; uf.valid(); uf = u.next(uf) {
   599  				sf := u.srcFunc(uf)
   600  				if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
   601  					// ignore wrappers
   602  				} else if more := skipOrAdd(uf.pc + 1); !more {
   603  					return n
   604  				}
   605  				lastFuncID = sf.funcID
   606  			}
   607  		} else {
   608  			// We've skipped the desired number of frames, so no need
   609  			// to perform further inline expansion now.
   610  			pcBuf[n] = pc
   611  			n++
   612  		}
   613  
   614  		// follow the frame pointer to the next one
   615  		fp = unsafe.Pointer(*(*uintptr)(fp))
   616  	}
   617  	return n
   618  }
   619  
   620  // lockTimer assists with profiling contention on runtime-internal locks.
   621  //
   622  // There are several steps between the time that an M experiences contention and
   623  // when that contention may be added to the profile. This comes from our
   624  // constraints: We need to keep the critical section of each lock small,
   625  // especially when those locks are contended. The reporting code cannot acquire
   626  // new locks until the M has released all other locks, which means no memory
   627  // allocations and encourages use of (temporary) M-local storage.
   628  //
   629  // The M will have space for storing one call stack that caused contention, and
   630  // for the magnitude of that contention. It will also have space to store the
   631  // magnitude of additional contention the M caused, since it only has space to
   632  // remember one call stack and might encounter several contention events before
   633  // it releases all of its locks and is thus able to transfer the local buffer
   634  // into the profile.
   635  //
   636  // The M will collect the call stack when it unlocks the contended lock. That
   637  // minimizes the impact on the critical section of the contended lock, and
   638  // matches the mutex profile's behavior for contention in sync.Mutex: measured
   639  // at the Unlock method.
   640  //
   641  // The profile for contention on sync.Mutex blames the caller of Unlock for the
   642  // amount of contention experienced by the callers of Lock which had to wait.
   643  // When there are several critical sections, this allows identifying which of
   644  // them is responsible.
   645  //
   646  // Matching that behavior for runtime-internal locks will require identifying
   647  // which Ms are blocked on the mutex. The semaphore-based implementation is
   648  // ready to allow that, but the futex-based implementation will require a bit
   649  // more work. Until then, we report contention on runtime-internal locks with a
   650  // call stack taken from the unlock call (like the rest of the user-space
   651  // "mutex" profile), but assign it a duration value based on how long the
   652  // previous lock call took (like the user-space "block" profile).
   653  //
   654  // Thus, reporting the call stacks of runtime-internal lock contention is
   655  // guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable.
   656  //
   657  // TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment
   658  //
   659  // The M will track this by storing a pointer to the lock; lock/unlock pairs for
   660  // runtime-internal locks are always on the same M.
   661  //
   662  // Together, that demands several steps for recording contention. First, when
   663  // finally acquiring a contended lock, the M decides whether it should plan to
   664  // profile that event by storing a pointer to the lock in its "to be profiled
   665  // upon unlock" field. If that field is already set, it uses the relative
   666  // magnitudes to weight a random choice between itself and the other lock, with
   667  // the loser's time being added to the "additional contention" field. Otherwise
   668  // if the M's call stack buffer is occupied, it does the comparison against that
   669  // sample's magnitude.
   670  //
   671  // Second, having unlocked a mutex the M checks to see if it should capture the
   672  // call stack into its local buffer. Finally, when the M unlocks its last mutex,
   673  // it transfers the local buffer into the profile. As part of that step, it also
   674  // transfers any "additional contention" time to the profile. Any lock
   675  // contention that it experiences while adding samples to the profile will be
   676  // recorded later as "additional contention" and not include a call stack, to
   677  // avoid an echo.
   678  type lockTimer struct {
   679  	lock      *mutex
   680  	timeRate  int64
   681  	timeStart int64
   682  	tickStart int64
   683  }
   684  
   685  func (lt *lockTimer) begin() {
   686  	rate := int64(atomic.Load64(&mutexprofilerate))
   687  
   688  	lt.timeRate = gTrackingPeriod
   689  	if rate != 0 && rate < lt.timeRate {
   690  		lt.timeRate = rate
   691  	}
   692  	if int64(cheaprand())%lt.timeRate == 0 {
   693  		lt.timeStart = nanotime()
   694  	}
   695  
   696  	if rate > 0 && int64(cheaprand())%rate == 0 {
   697  		lt.tickStart = cputicks()
   698  	}
   699  }
   700  
   701  func (lt *lockTimer) end() {
   702  	gp := getg()
   703  
   704  	if lt.timeStart != 0 {
   705  		nowTime := nanotime()
   706  		gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate)
   707  	}
   708  
   709  	if lt.tickStart != 0 {
   710  		nowTick := cputicks()
   711  		gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock)
   712  	}
   713  }
   714  
   715  type mLockProfile struct {
   716  	waitTime   atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
   717  	stack      []uintptr    // stack that experienced contention in runtime.lockWithRank
   718  	pending    uintptr      // *mutex that experienced contention (to be traceback-ed)
   719  	cycles     int64        // cycles attributable to "pending" (if set), otherwise to "stack"
   720  	cyclesLost int64        // contention for which we weren't able to record a call stack
   721  	haveStack  bool         // stack and cycles are to be added to the mutex profile
   722  	disabled   bool         // attribute all time to "lost"
   723  }
   724  
   725  func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
   726  	if cycles < 0 {
   727  		cycles = 0
   728  	}
   729  
   730  	if prof.disabled {
   731  		// We're experiencing contention while attempting to report contention.
   732  		// Make a note of its magnitude, but don't allow it to be the sole cause
   733  		// of another contention report.
   734  		prof.cyclesLost += cycles
   735  		return
   736  	}
   737  
   738  	if uintptr(unsafe.Pointer(l)) == prof.pending {
   739  		// Optimization: we'd already planned to profile this same lock (though
   740  		// possibly from a different unlock site).
   741  		prof.cycles += cycles
   742  		return
   743  	}
   744  
   745  	if prev := prof.cycles; prev > 0 {
   746  		// We can only store one call stack for runtime-internal lock contention
   747  		// on this M, and we've already got one. Decide which should stay, and
   748  		// add the other to the report for runtime._LostContendedRuntimeLock.
   749  		if cycles == 0 {
   750  			return
   751  		}
   752  		prevScore := uint64(cheaprand64()) % uint64(prev)
   753  		thisScore := uint64(cheaprand64()) % uint64(cycles)
   754  		if prevScore > thisScore {
   755  			prof.cyclesLost += cycles
   756  			return
   757  		} else {
   758  			prof.cyclesLost += prev
   759  		}
   760  	}
   761  	// Saving the *mutex as a uintptr is safe because:
   762  	//  - lockrank_on.go does this too, which gives it regular exercise
   763  	//  - the lock would only move if it's stack allocated, which means it
   764  	//      cannot experience multi-M contention
   765  	prof.pending = uintptr(unsafe.Pointer(l))
   766  	prof.cycles = cycles
   767  }
   768  
   769  // From unlock2, we might not be holding a p in this code.
   770  //
   771  //go:nowritebarrierrec
   772  func (prof *mLockProfile) recordUnlock(l *mutex) {
   773  	if uintptr(unsafe.Pointer(l)) == prof.pending {
   774  		prof.captureStack()
   775  	}
   776  	if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {
   777  		prof.store()
   778  	}
   779  }
   780  
   781  func (prof *mLockProfile) captureStack() {
   782  	if debug.profstackdepth == 0 {
   783  		// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
   784  		// can't record a stack trace.
   785  		return
   786  	}
   787  
   788  	skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank
   789  	if staticLockRanking {
   790  		// When static lock ranking is enabled, we'll always be on the system
   791  		// stack at this point. There will be a runtime.unlockWithRank.func1
   792  		// frame, and if the call to runtime.unlock took place on a user stack
   793  		// then there'll also be a runtime.systemstack frame. To keep stack
   794  		// traces somewhat consistent whether or not static lock ranking is
   795  		// enabled, we'd like to skip those. But it's hard to tell how long
   796  		// we've been on the system stack so accept an extra frame in that case,
   797  		// with a leaf of "runtime.unlockWithRank runtime.unlock" instead of
   798  		// "runtime.unlock".
   799  		skip += 1 // runtime.unlockWithRank.func1
   800  	}
   801  	prof.pending = 0
   802  	prof.haveStack = true
   803  
   804  	prof.stack[0] = logicalStackSentinel
   805  	if debug.runtimeContentionStacks.Load() == 0 {
   806  		prof.stack[1] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
   807  		prof.stack[2] = 0
   808  		return
   809  	}
   810  
   811  	var nstk int
   812  	gp := getg()
   813  	sp := sys.GetCallerSP()
   814  	pc := sys.GetCallerPC()
   815  	systemstack(func() {
   816  		var u unwinder
   817  		u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
   818  		nstk = 1 + tracebackPCs(&u, skip, prof.stack[1:])
   819  	})
   820  	if nstk < len(prof.stack) {
   821  		prof.stack[nstk] = 0
   822  	}
   823  }
   824  
   825  func (prof *mLockProfile) store() {
   826  	// Report any contention we experience within this function as "lost"; it's
   827  	// important that the act of reporting a contention event not lead to a
   828  	// reportable contention event. This also means we can use prof.stack
   829  	// without copying, since it won't change during this function.
   830  	mp := acquirem()
   831  	prof.disabled = true
   832  
   833  	nstk := int(debug.profstackdepth)
   834  	for i := 0; i < nstk; i++ {
   835  		if pc := prof.stack[i]; pc == 0 {
   836  			nstk = i
   837  			break
   838  		}
   839  	}
   840  
   841  	cycles, lost := prof.cycles, prof.cyclesLost
   842  	prof.cycles, prof.cyclesLost = 0, 0
   843  	prof.haveStack = false
   844  
   845  	rate := int64(atomic.Load64(&mutexprofilerate))
   846  	saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
   847  	if lost > 0 {
   848  		lostStk := [...]uintptr{
   849  			logicalStackSentinel,
   850  			abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
   851  		}
   852  		saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
   853  	}
   854  
   855  	prof.disabled = false
   856  	releasem(mp)
   857  }
   858  
   859  func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) {
   860  	b := stkbucket(which, 0, stk, true)
   861  	bp := b.bp()
   862  
   863  	lock(&profBlockLock)
   864  	// We want to up-scale the count and cycles according to the
   865  	// probability that the event was sampled. For block profile events,
   866  	// the sample probability is 1 if cycles >= rate, and cycles / rate
   867  	// otherwise. For mutex profile events, the sample probability is 1 / rate.
   868  	// We scale the events by 1 / (probability the event was sampled).
   869  	if which == blockProfile && cycles < rate {
   870  		// Remove sampling bias, see discussion on http://golang.org/cl/299991.
   871  		bp.count += float64(rate) / float64(cycles)
   872  		bp.cycles += rate
   873  	} else if which == mutexProfile {
   874  		bp.count += float64(rate)
   875  		bp.cycles += rate * cycles
   876  	} else {
   877  		bp.count++
   878  		bp.cycles += cycles
   879  	}
   880  	unlock(&profBlockLock)
   881  }
   882  
   883  var mutexprofilerate uint64 // fraction sampled
   884  
   885  // SetMutexProfileFraction controls the fraction of mutex contention events
   886  // that are reported in the mutex profile. On average 1/rate events are
   887  // reported. The previous rate is returned.
   888  //
   889  // To turn off profiling entirely, pass rate 0.
   890  // To just read the current rate, pass rate < 0.
   891  // (For n>1 the details of sampling may change.)
   892  func SetMutexProfileFraction(rate int) int {
   893  	if rate < 0 {
   894  		return int(mutexprofilerate)
   895  	}
   896  	old := mutexprofilerate
   897  	atomic.Store64(&mutexprofilerate, uint64(rate))
   898  	return int(old)
   899  }
   900  
   901  //go:linkname mutexevent sync.event
   902  func mutexevent(cycles int64, skip int) {
   903  	if cycles < 0 {
   904  		cycles = 0
   905  	}
   906  	rate := int64(atomic.Load64(&mutexprofilerate))
   907  	if rate > 0 && cheaprand64()%rate == 0 {
   908  		saveblockevent(cycles, rate, skip+1, mutexProfile)
   909  	}
   910  }
   911  
   912  // Go interface to profile data.
   913  
   914  // A StackRecord describes a single execution stack.
   915  type StackRecord struct {
   916  	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
   917  }
   918  
   919  // Stack returns the stack trace associated with the record,
   920  // a prefix of r.Stack0.
   921  func (r *StackRecord) Stack() []uintptr {
   922  	for i, v := range r.Stack0 {
   923  		if v == 0 {
   924  			return r.Stack0[0:i]
   925  		}
   926  	}
   927  	return r.Stack0[0:]
   928  }
   929  
   930  // MemProfileRate controls the fraction of memory allocations
   931  // that are recorded and reported in the memory profile.
   932  // The profiler aims to sample an average of
   933  // one allocation per MemProfileRate bytes allocated.
   934  //
   935  // To include every allocated block in the profile, set MemProfileRate to 1.
   936  // To turn off profiling entirely, set MemProfileRate to 0.
   937  //
   938  // The tools that process the memory profiles assume that the
   939  // profile rate is constant across the lifetime of the program
   940  // and equal to the current value. Programs that change the
   941  // memory profiling rate should do so just once, as early as
   942  // possible in the execution of the program (for example,
   943  // at the beginning of main).
   944  var MemProfileRate int = 512 * 1024
   945  
   946  // disableMemoryProfiling is set by the linker if memory profiling
   947  // is not used and the link type guarantees nobody else could use it
   948  // elsewhere.
   949  // We check if the runtime.memProfileInternal symbol is present.
   950  var disableMemoryProfiling bool
   951  
   952  // A MemProfileRecord describes the live objects allocated
   953  // by a particular call sequence (stack trace).
   954  type MemProfileRecord struct {
   955  	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
   956  	AllocObjects, FreeObjects int64       // number of objects allocated, freed
   957  	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
   958  }
   959  
   960  // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
   961  func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
   962  
   963  // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
   964  func (r *MemProfileRecord) InUseObjects() int64 {
   965  	return r.AllocObjects - r.FreeObjects
   966  }
   967  
   968  // Stack returns the stack trace associated with the record,
   969  // a prefix of r.Stack0.
   970  func (r *MemProfileRecord) Stack() []uintptr {
   971  	for i, v := range r.Stack0 {
   972  		if v == 0 {
   973  			return r.Stack0[0:i]
   974  		}
   975  	}
   976  	return r.Stack0[0:]
   977  }
   978  
   979  // MemProfile returns a profile of memory allocated and freed per allocation
   980  // site.
   981  //
   982  // MemProfile returns n, the number of records in the current memory profile.
   983  // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
   984  // If len(p) < n, MemProfile does not change p and returns n, false.
   985  //
   986  // If inuseZero is true, the profile includes allocation records
   987  // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
   988  // These are sites where memory was allocated, but it has all
   989  // been released back to the runtime.
   990  //
   991  // The returned profile may be up to two garbage collection cycles old.
   992  // This is to avoid skewing the profile toward allocations; because
   993  // allocations happen in real time but frees are delayed until the garbage
   994  // collector performs sweeping, the profile only accounts for allocations
   995  // that have had a chance to be freed by the garbage collector.
   996  //
   997  // Most clients should use the runtime/pprof package or
   998  // the testing package's -test.memprofile flag instead
   999  // of calling MemProfile directly.
  1000  func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
  1001  	return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
  1002  		copyMemProfileRecord(&p[0], r)
  1003  		p = p[1:]
  1004  	})
  1005  }
  1006  
  1007  // memProfileInternal returns the number of records n in the profile. If there
  1008  // are less than size records, copyFn is invoked for each record, and ok returns
  1009  // true.
  1010  //
  1011  // The linker set disableMemoryProfiling to true to disable memory profiling
  1012  // if this function is not reachable. Mark it noinline to ensure the symbol exists.
  1013  // (This function is big and normally not inlined anyway.)
  1014  // See also disableMemoryProfiling above and cmd/link/internal/ld/lib.go:linksetup.
  1015  //
  1016  //go:noinline
  1017  func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool) {
  1018  	cycle := mProfCycle.read()
  1019  	// If we're between mProf_NextCycle and mProf_Flush, take care
  1020  	// of flushing to the active profile so we only have to look
  1021  	// at the active profile below.
  1022  	index := cycle % uint32(len(memRecord{}.future))
  1023  	lock(&profMemActiveLock)
  1024  	lock(&profMemFutureLock[index])
  1025  	mProf_FlushLocked(index)
  1026  	unlock(&profMemFutureLock[index])
  1027  	clear := true
  1028  	head := (*bucket)(mbuckets.Load())
  1029  	for b := head; b != nil; b = b.allnext {
  1030  		mp := b.mp()
  1031  		if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
  1032  			n++
  1033  		}
  1034  		if mp.active.allocs != 0 || mp.active.frees != 0 {
  1035  			clear = false
  1036  		}
  1037  	}
  1038  	if clear {
  1039  		// Absolutely no data, suggesting that a garbage collection
  1040  		// has not yet happened. In order to allow profiling when
  1041  		// garbage collection is disabled from the beginning of execution,
  1042  		// accumulate all of the cycles, and recount buckets.
  1043  		n = 0
  1044  		for b := head; b != nil; b = b.allnext {
  1045  			mp := b.mp()
  1046  			for c := range mp.future {
  1047  				lock(&profMemFutureLock[c])
  1048  				mp.active.add(&mp.future[c])
  1049  				mp.future[c] = memRecordCycle{}
  1050  				unlock(&profMemFutureLock[c])
  1051  			}
  1052  			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
  1053  				n++
  1054  			}
  1055  		}
  1056  	}
  1057  	if n <= size {
  1058  		ok = true
  1059  		for b := head; b != nil; b = b.allnext {
  1060  			mp := b.mp()
  1061  			if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
  1062  				r := profilerecord.MemProfileRecord{
  1063  					AllocBytes:   int64(mp.active.alloc_bytes),
  1064  					FreeBytes:    int64(mp.active.free_bytes),
  1065  					AllocObjects: int64(mp.active.allocs),
  1066  					FreeObjects:  int64(mp.active.frees),
  1067  					Stack:        b.stk(),
  1068  				}
  1069  				copyFn(r)
  1070  			}
  1071  		}
  1072  	}
  1073  	unlock(&profMemActiveLock)
  1074  	return
  1075  }
  1076  
  1077  func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord) {
  1078  	dst.AllocBytes = src.AllocBytes
  1079  	dst.FreeBytes = src.FreeBytes
  1080  	dst.AllocObjects = src.AllocObjects
  1081  	dst.FreeObjects = src.FreeObjects
  1082  	if raceenabled {
  1083  		racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(MemProfile))
  1084  	}
  1085  	if msanenabled {
  1086  		msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1087  	}
  1088  	if asanenabled {
  1089  		asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1090  	}
  1091  	i := copy(dst.Stack0[:], src.Stack)
  1092  	clear(dst.Stack0[i:])
  1093  }
  1094  
  1095  //go:linkname pprof_memProfileInternal
  1096  func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) {
  1097  	return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
  1098  		p[0] = r
  1099  		p = p[1:]
  1100  	})
  1101  }
  1102  
  1103  func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
  1104  	lock(&profMemActiveLock)
  1105  	head := (*bucket)(mbuckets.Load())
  1106  	for b := head; b != nil; b = b.allnext {
  1107  		mp := b.mp()
  1108  		fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
  1109  	}
  1110  	unlock(&profMemActiveLock)
  1111  }
  1112  
  1113  // BlockProfileRecord describes blocking events originated
  1114  // at a particular call sequence (stack trace).
  1115  type BlockProfileRecord struct {
  1116  	Count  int64
  1117  	Cycles int64
  1118  	StackRecord
  1119  }
  1120  
  1121  // BlockProfile returns n, the number of records in the current blocking profile.
  1122  // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
  1123  // If len(p) < n, BlockProfile does not change p and returns n, false.
  1124  //
  1125  // Most clients should use the [runtime/pprof] package or
  1126  // the [testing] package's -test.blockprofile flag instead
  1127  // of calling BlockProfile directly.
  1128  func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
  1129  	var m int
  1130  	n, ok = blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1131  		copyBlockProfileRecord(&p[m], r)
  1132  		m++
  1133  	})
  1134  	if ok {
  1135  		expandFrames(p[:n])
  1136  	}
  1137  	return
  1138  }
  1139  
  1140  func expandFrames(p []BlockProfileRecord) {
  1141  	expandedStack := makeProfStack()
  1142  	for i := range p {
  1143  		cf := CallersFrames(p[i].Stack())
  1144  		j := 0
  1145  		for j < len(expandedStack) {
  1146  			f, more := cf.Next()
  1147  			// f.PC is a "call PC", but later consumers will expect
  1148  			// "return PCs"
  1149  			expandedStack[j] = f.PC + 1
  1150  			j++
  1151  			if !more {
  1152  				break
  1153  			}
  1154  		}
  1155  		k := copy(p[i].Stack0[:], expandedStack[:j])
  1156  		clear(p[i].Stack0[k:])
  1157  	}
  1158  }
  1159  
  1160  // blockProfileInternal returns the number of records n in the profile. If there
  1161  // are less than size records, copyFn is invoked for each record, and ok returns
  1162  // true.
  1163  func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
  1164  	lock(&profBlockLock)
  1165  	head := (*bucket)(bbuckets.Load())
  1166  	for b := head; b != nil; b = b.allnext {
  1167  		n++
  1168  	}
  1169  	if n <= size {
  1170  		ok = true
  1171  		for b := head; b != nil; b = b.allnext {
  1172  			bp := b.bp()
  1173  			r := profilerecord.BlockProfileRecord{
  1174  				Count:  int64(bp.count),
  1175  				Cycles: bp.cycles,
  1176  				Stack:  b.stk(),
  1177  			}
  1178  			// Prevent callers from having to worry about division by zero errors.
  1179  			// See discussion on http://golang.org/cl/299991.
  1180  			if r.Count == 0 {
  1181  				r.Count = 1
  1182  			}
  1183  			copyFn(r)
  1184  		}
  1185  	}
  1186  	unlock(&profBlockLock)
  1187  	return
  1188  }
  1189  
  1190  // copyBlockProfileRecord copies the sample values and call stack from src to dst.
  1191  // The call stack is copied as-is. The caller is responsible for handling inline
  1192  // expansion, needed when the call stack was collected with frame pointer unwinding.
  1193  func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
  1194  	dst.Count = src.Count
  1195  	dst.Cycles = src.Cycles
  1196  	if raceenabled {
  1197  		racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(BlockProfile))
  1198  	}
  1199  	if msanenabled {
  1200  		msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1201  	}
  1202  	if asanenabled {
  1203  		asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1204  	}
  1205  	// We just copy the stack here without inline expansion
  1206  	// (needed if frame pointer unwinding is used)
  1207  	// since this function is called under the profile lock,
  1208  	// and doing something that might allocate can violate lock ordering.
  1209  	i := copy(dst.Stack0[:], src.Stack)
  1210  	clear(dst.Stack0[i:])
  1211  }
  1212  
  1213  //go:linkname pprof_blockProfileInternal
  1214  func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
  1215  	return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1216  		p[0] = r
  1217  		p = p[1:]
  1218  	})
  1219  }
  1220  
  1221  // MutexProfile returns n, the number of records in the current mutex profile.
  1222  // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
  1223  // Otherwise, MutexProfile does not change p, and returns n, false.
  1224  //
  1225  // Most clients should use the [runtime/pprof] package
  1226  // instead of calling MutexProfile directly.
  1227  func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
  1228  	var m int
  1229  	n, ok = mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1230  		copyBlockProfileRecord(&p[m], r)
  1231  		m++
  1232  	})
  1233  	if ok {
  1234  		expandFrames(p[:n])
  1235  	}
  1236  	return
  1237  }
  1238  
  1239  // mutexProfileInternal returns the number of records n in the profile. If there
  1240  // are less than size records, copyFn is invoked for each record, and ok returns
  1241  // true.
  1242  func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
  1243  	lock(&profBlockLock)
  1244  	head := (*bucket)(xbuckets.Load())
  1245  	for b := head; b != nil; b = b.allnext {
  1246  		n++
  1247  	}
  1248  	if n <= size {
  1249  		ok = true
  1250  		for b := head; b != nil; b = b.allnext {
  1251  			bp := b.bp()
  1252  			r := profilerecord.BlockProfileRecord{
  1253  				Count:  int64(bp.count),
  1254  				Cycles: bp.cycles,
  1255  				Stack:  b.stk(),
  1256  			}
  1257  			copyFn(r)
  1258  		}
  1259  	}
  1260  	unlock(&profBlockLock)
  1261  	return
  1262  }
  1263  
  1264  //go:linkname pprof_mutexProfileInternal
  1265  func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
  1266  	return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1267  		p[0] = r
  1268  		p = p[1:]
  1269  	})
  1270  }
  1271  
  1272  // ThreadCreateProfile returns n, the number of records in the thread creation profile.
  1273  // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
  1274  // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
  1275  //
  1276  // Most clients should use the runtime/pprof package instead
  1277  // of calling ThreadCreateProfile directly.
  1278  func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
  1279  	return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
  1280  		i := copy(p[0].Stack0[:], r.Stack)
  1281  		clear(p[0].Stack0[i:])
  1282  		p = p[1:]
  1283  	})
  1284  }
  1285  
  1286  // threadCreateProfileInternal returns the number of records n in the profile.
  1287  // If there are less than size records, copyFn is invoked for each record, and
  1288  // ok returns true.
  1289  func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
  1290  	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
  1291  	for mp := first; mp != nil; mp = mp.alllink {
  1292  		n++
  1293  	}
  1294  	if n <= size {
  1295  		ok = true
  1296  		for mp := first; mp != nil; mp = mp.alllink {
  1297  			r := profilerecord.StackRecord{Stack: mp.createstack[:]}
  1298  			copyFn(r)
  1299  		}
  1300  	}
  1301  	return
  1302  }
  1303  
  1304  //go:linkname pprof_threadCreateInternal
  1305  func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) {
  1306  	return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
  1307  		p[0] = r
  1308  		p = p[1:]
  1309  	})
  1310  }
  1311  
  1312  //go:linkname pprof_goroutineProfileWithLabels
  1313  func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1314  	return goroutineProfileWithLabels(p, labels)
  1315  }
  1316  
  1317  // labels may be nil. If labels is non-nil, it must have the same length as p.
  1318  func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1319  	if labels != nil && len(labels) != len(p) {
  1320  		labels = nil
  1321  	}
  1322  
  1323  	return goroutineProfileWithLabelsConcurrent(p, labels)
  1324  }
  1325  
  1326  var goroutineProfile = struct {
  1327  	sema    uint32
  1328  	active  bool
  1329  	offset  atomic.Int64
  1330  	records []profilerecord.StackRecord
  1331  	labels  []unsafe.Pointer
  1332  }{
  1333  	sema: 1,
  1334  }
  1335  
  1336  // goroutineProfileState indicates the status of a goroutine's stack for the
  1337  // current in-progress goroutine profile. Goroutines' stacks are initially
  1338  // "Absent" from the profile, and end up "Satisfied" by the time the profile is
  1339  // complete. While a goroutine's stack is being captured, its
  1340  // goroutineProfileState will be "InProgress" and it will not be able to run
  1341  // until the capture completes and the state moves to "Satisfied".
  1342  //
  1343  // Some goroutines (the finalizer goroutine, which at various times can be
  1344  // either a "system" or a "user" goroutine, and the goroutine that is
  1345  // coordinating the profile, any goroutines created during the profile) move
  1346  // directly to the "Satisfied" state.
  1347  type goroutineProfileState uint32
  1348  
  1349  const (
  1350  	goroutineProfileAbsent goroutineProfileState = iota
  1351  	goroutineProfileInProgress
  1352  	goroutineProfileSatisfied
  1353  )
  1354  
  1355  type goroutineProfileStateHolder atomic.Uint32
  1356  
  1357  func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
  1358  	return goroutineProfileState((*atomic.Uint32)(p).Load())
  1359  }
  1360  
  1361  func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
  1362  	(*atomic.Uint32)(p).Store(uint32(value))
  1363  }
  1364  
  1365  func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
  1366  	return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
  1367  }
  1368  
  1369  func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1370  	if len(p) == 0 {
  1371  		// An empty slice is obviously too small. Return a rough
  1372  		// allocation estimate without bothering to STW. As long as
  1373  		// this is close, then we'll only need to STW once (on the next
  1374  		// call).
  1375  		return int(gcount()), false
  1376  	}
  1377  
  1378  	semacquire(&goroutineProfile.sema)
  1379  
  1380  	ourg := getg()
  1381  
  1382  	pcbuf := makeProfStack() // see saveg() for explanation
  1383  	stw := stopTheWorld(stwGoroutineProfile)
  1384  	// Using gcount while the world is stopped should give us a consistent view
  1385  	// of the number of live goroutines, minus the number of goroutines that are
  1386  	// alive and permanently marked as "system". But to make this count agree
  1387  	// with what we'd get from isSystemGoroutine, we need special handling for
  1388  	// goroutines that can vary between user and system to ensure that the count
  1389  	// doesn't change during the collection. So, check the finalizer goroutine
  1390  	// in particular.
  1391  	n = int(gcount())
  1392  	if fingStatus.Load()&fingRunningFinalizer != 0 {
  1393  		n++
  1394  	}
  1395  
  1396  	if n > len(p) {
  1397  		// There's not enough space in p to store the whole profile, so (per the
  1398  		// contract of runtime.GoroutineProfile) we're not allowed to write to p
  1399  		// at all and must return n, false.
  1400  		startTheWorld(stw)
  1401  		semrelease(&goroutineProfile.sema)
  1402  		return n, false
  1403  	}
  1404  
  1405  	// Save current goroutine.
  1406  	sp := sys.GetCallerSP()
  1407  	pc := sys.GetCallerPC()
  1408  	systemstack(func() {
  1409  		saveg(pc, sp, ourg, &p[0], pcbuf)
  1410  	})
  1411  	if labels != nil {
  1412  		labels[0] = ourg.labels
  1413  	}
  1414  	ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
  1415  	goroutineProfile.offset.Store(1)
  1416  
  1417  	// Prepare for all other goroutines to enter the profile. Aside from ourg,
  1418  	// every goroutine struct in the allgs list has its goroutineProfiled field
  1419  	// cleared. Any goroutine created from this point on (while
  1420  	// goroutineProfile.active is set) will start with its goroutineProfiled
  1421  	// field set to goroutineProfileSatisfied.
  1422  	goroutineProfile.active = true
  1423  	goroutineProfile.records = p
  1424  	goroutineProfile.labels = labels
  1425  	// The finalizer goroutine needs special handling because it can vary over
  1426  	// time between being a user goroutine (eligible for this profile) and a
  1427  	// system goroutine (to be excluded). Pick one before restarting the world.
  1428  	if fing != nil {
  1429  		fing.goroutineProfiled.Store(goroutineProfileSatisfied)
  1430  		if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
  1431  			doRecordGoroutineProfile(fing, pcbuf)
  1432  		}
  1433  	}
  1434  	startTheWorld(stw)
  1435  
  1436  	// Visit each goroutine that existed as of the startTheWorld call above.
  1437  	//
  1438  	// New goroutines may not be in this list, but we didn't want to know about
  1439  	// them anyway. If they do appear in this list (via reusing a dead goroutine
  1440  	// struct, or racing to launch between the world restarting and us getting
  1441  	// the list), they will already have their goroutineProfiled field set to
  1442  	// goroutineProfileSatisfied before their state transitions out of _Gdead.
  1443  	//
  1444  	// Any goroutine that the scheduler tries to execute concurrently with this
  1445  	// call will start by adding itself to the profile (before the act of
  1446  	// executing can cause any changes in its stack).
  1447  	forEachGRace(func(gp1 *g) {
  1448  		tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
  1449  	})
  1450  
  1451  	stw = stopTheWorld(stwGoroutineProfileCleanup)
  1452  	endOffset := goroutineProfile.offset.Swap(0)
  1453  	goroutineProfile.active = false
  1454  	goroutineProfile.records = nil
  1455  	goroutineProfile.labels = nil
  1456  	startTheWorld(stw)
  1457  
  1458  	// Restore the invariant that every goroutine struct in allgs has its
  1459  	// goroutineProfiled field cleared.
  1460  	forEachGRace(func(gp1 *g) {
  1461  		gp1.goroutineProfiled.Store(goroutineProfileAbsent)
  1462  	})
  1463  
  1464  	if raceenabled {
  1465  		raceacquire(unsafe.Pointer(&labelSync))
  1466  	}
  1467  
  1468  	if n != int(endOffset) {
  1469  		// It's a big surprise that the number of goroutines changed while we
  1470  		// were collecting the profile. But probably better to return a
  1471  		// truncated profile than to crash the whole process.
  1472  		//
  1473  		// For instance, needm moves a goroutine out of the _Gdead state and so
  1474  		// might be able to change the goroutine count without interacting with
  1475  		// the scheduler. For code like that, the race windows are small and the
  1476  		// combination of features is uncommon, so it's hard to be (and remain)
  1477  		// sure we've caught them all.
  1478  	}
  1479  
  1480  	semrelease(&goroutineProfile.sema)
  1481  	return n, true
  1482  }
  1483  
  1484  // tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
  1485  // tryRecordGoroutineProfile.
  1486  //
  1487  //go:yeswritebarrierrec
  1488  func tryRecordGoroutineProfileWB(gp1 *g) {
  1489  	if getg().m.p.ptr() == nil {
  1490  		throw("no P available, write barriers are forbidden")
  1491  	}
  1492  	tryRecordGoroutineProfile(gp1, nil, osyield)
  1493  }
  1494  
  1495  // tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
  1496  // in the current goroutine profile: either that it should not be profiled, or
  1497  // that a snapshot of its call stack and labels are now in the profile.
  1498  func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
  1499  	if readgstatus(gp1) == _Gdead {
  1500  		// Dead goroutines should not appear in the profile. Goroutines that
  1501  		// start while profile collection is active will get goroutineProfiled
  1502  		// set to goroutineProfileSatisfied before transitioning out of _Gdead,
  1503  		// so here we check _Gdead first.
  1504  		return
  1505  	}
  1506  	if isSystemGoroutine(gp1, true) {
  1507  		// System goroutines should not appear in the profile. (The finalizer
  1508  		// goroutine is marked as "already profiled".)
  1509  		return
  1510  	}
  1511  
  1512  	for {
  1513  		prev := gp1.goroutineProfiled.Load()
  1514  		if prev == goroutineProfileSatisfied {
  1515  			// This goroutine is already in the profile (or is new since the
  1516  			// start of collection, so shouldn't appear in the profile).
  1517  			break
  1518  		}
  1519  		if prev == goroutineProfileInProgress {
  1520  			// Something else is adding gp1 to the goroutine profile right now.
  1521  			// Give that a moment to finish.
  1522  			yield()
  1523  			continue
  1524  		}
  1525  
  1526  		// While we have gp1.goroutineProfiled set to
  1527  		// goroutineProfileInProgress, gp1 may appear _Grunnable but will not
  1528  		// actually be able to run. Disable preemption for ourselves, to make
  1529  		// sure we finish profiling gp1 right away instead of leaving it stuck
  1530  		// in this limbo.
  1531  		mp := acquirem()
  1532  		if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
  1533  			doRecordGoroutineProfile(gp1, pcbuf)
  1534  			gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
  1535  		}
  1536  		releasem(mp)
  1537  	}
  1538  }
  1539  
  1540  // doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
  1541  // goroutine profile. Preemption is disabled.
  1542  //
  1543  // This may be called via tryRecordGoroutineProfile in two ways: by the
  1544  // goroutine that is coordinating the goroutine profile (running on its own
  1545  // stack), or from the scheduler in preparation to execute gp1 (running on the
  1546  // system stack).
  1547  func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
  1548  	if readgstatus(gp1) == _Grunning {
  1549  		print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
  1550  		throw("cannot read stack of running goroutine")
  1551  	}
  1552  
  1553  	offset := int(goroutineProfile.offset.Add(1)) - 1
  1554  
  1555  	if offset >= len(goroutineProfile.records) {
  1556  		// Should be impossible, but better to return a truncated profile than
  1557  		// to crash the entire process at this point. Instead, deal with it in
  1558  		// goroutineProfileWithLabelsConcurrent where we have more context.
  1559  		return
  1560  	}
  1561  
  1562  	// saveg calls gentraceback, which may call cgo traceback functions. When
  1563  	// called from the scheduler, this is on the system stack already so
  1564  	// traceback.go:cgoContextPCs will avoid calling back into the scheduler.
  1565  	//
  1566  	// When called from the goroutine coordinating the profile, we still have
  1567  	// set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
  1568  	// preventing it from being truly _Grunnable. So we'll use the system stack
  1569  	// to avoid schedule delays.
  1570  	systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })
  1571  
  1572  	if goroutineProfile.labels != nil {
  1573  		goroutineProfile.labels[offset] = gp1.labels
  1574  	}
  1575  }
  1576  
  1577  func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1578  	gp := getg()
  1579  
  1580  	isOK := func(gp1 *g) bool {
  1581  		// Checking isSystemGoroutine here makes GoroutineProfile
  1582  		// consistent with both NumGoroutine and Stack.
  1583  		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
  1584  	}
  1585  
  1586  	pcbuf := makeProfStack() // see saveg() for explanation
  1587  	stw := stopTheWorld(stwGoroutineProfile)
  1588  
  1589  	// World is stopped, no locking required.
  1590  	n = 1
  1591  	forEachGRace(func(gp1 *g) {
  1592  		if isOK(gp1) {
  1593  			n++
  1594  		}
  1595  	})
  1596  
  1597  	if n <= len(p) {
  1598  		ok = true
  1599  		r, lbl := p, labels
  1600  
  1601  		// Save current goroutine.
  1602  		sp := sys.GetCallerSP()
  1603  		pc := sys.GetCallerPC()
  1604  		systemstack(func() {
  1605  			saveg(pc, sp, gp, &r[0], pcbuf)
  1606  		})
  1607  		r = r[1:]
  1608  
  1609  		// If we have a place to put our goroutine labelmap, insert it there.
  1610  		if labels != nil {
  1611  			lbl[0] = gp.labels
  1612  			lbl = lbl[1:]
  1613  		}
  1614  
  1615  		// Save other goroutines.
  1616  		forEachGRace(func(gp1 *g) {
  1617  			if !isOK(gp1) {
  1618  				return
  1619  			}
  1620  
  1621  			if len(r) == 0 {
  1622  				// Should be impossible, but better to return a
  1623  				// truncated profile than to crash the entire process.
  1624  				return
  1625  			}
  1626  			// saveg calls gentraceback, which may call cgo traceback functions.
  1627  			// The world is stopped, so it cannot use cgocall (which will be
  1628  			// blocked at exitsyscall). Do it on the system stack so it won't
  1629  			// call into the schedular (see traceback.go:cgoContextPCs).
  1630  			systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0], pcbuf) })
  1631  			if labels != nil {
  1632  				lbl[0] = gp1.labels
  1633  				lbl = lbl[1:]
  1634  			}
  1635  			r = r[1:]
  1636  		})
  1637  	}
  1638  
  1639  	if raceenabled {
  1640  		raceacquire(unsafe.Pointer(&labelSync))
  1641  	}
  1642  
  1643  	startTheWorld(stw)
  1644  	return n, ok
  1645  }
  1646  
  1647  // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
  1648  // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
  1649  // If len(p) < n, GoroutineProfile does not change p and returns n, false.
  1650  //
  1651  // Most clients should use the [runtime/pprof] package instead
  1652  // of calling GoroutineProfile directly.
  1653  func GoroutineProfile(p []StackRecord) (n int, ok bool) {
  1654  	records := make([]profilerecord.StackRecord, len(p))
  1655  	n, ok = goroutineProfileInternal(records)
  1656  	if !ok {
  1657  		return
  1658  	}
  1659  	for i, mr := range records[0:n] {
  1660  		l := copy(p[i].Stack0[:], mr.Stack)
  1661  		clear(p[i].Stack0[l:])
  1662  	}
  1663  	return
  1664  }
  1665  
  1666  func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
  1667  	return goroutineProfileWithLabels(p, nil)
  1668  }
  1669  
  1670  func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
  1671  	// To reduce memory usage, we want to allocate a r.Stack that is just big
  1672  	// enough to hold gp's stack trace. Naively we might achieve this by
  1673  	// recording our stack trace into mp.profStack, and then allocating a
  1674  	// r.Stack of the right size. However, mp.profStack is also used for
  1675  	// allocation profiling, so it could get overwritten if the slice allocation
  1676  	// gets profiled. So instead we record the stack trace into a temporary
  1677  	// pcbuf which is usually given to us by our caller. When it's not, we have
  1678  	// to allocate one here. This will only happen for goroutines that were in a
  1679  	// syscall when the goroutine profile started or for goroutines that manage
  1680  	// to execute before we finish iterating over all the goroutines.
  1681  	if pcbuf == nil {
  1682  		pcbuf = makeProfStack()
  1683  	}
  1684  
  1685  	var u unwinder
  1686  	u.initAt(pc, sp, 0, gp, unwindSilentErrors)
  1687  	n := tracebackPCs(&u, 0, pcbuf)
  1688  	r.Stack = make([]uintptr, n)
  1689  	copy(r.Stack, pcbuf)
  1690  }
  1691  
  1692  // Stack formats a stack trace of the calling goroutine into buf
  1693  // and returns the number of bytes written to buf.
  1694  // If all is true, Stack formats stack traces of all other goroutines
  1695  // into buf after the trace for the current goroutine.
  1696  func Stack(buf []byte, all bool) int {
  1697  	var stw worldStop
  1698  	if all {
  1699  		stw = stopTheWorld(stwAllGoroutinesStack)
  1700  	}
  1701  
  1702  	n := 0
  1703  	if len(buf) > 0 {
  1704  		gp := getg()
  1705  		sp := sys.GetCallerSP()
  1706  		pc := sys.GetCallerPC()
  1707  		systemstack(func() {
  1708  			g0 := getg()
  1709  			// Force traceback=1 to override GOTRACEBACK setting,
  1710  			// so that Stack's results are consistent.
  1711  			// GOTRACEBACK is only about crash dumps.
  1712  			g0.m.traceback = 1
  1713  			g0.writebuf = buf[0:0:len(buf)]
  1714  			goroutineheader(gp)
  1715  			traceback(pc, sp, 0, gp)
  1716  			if all {
  1717  				tracebackothers(gp)
  1718  			}
  1719  			g0.m.traceback = 0
  1720  			n = len(g0.writebuf)
  1721  			g0.writebuf = nil
  1722  		})
  1723  	}
  1724  
  1725  	if all {
  1726  		startTheWorld(stw)
  1727  	}
  1728  	return n
  1729  }
  1730  

View as plain text