Source file src/runtime/export_test.go

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goos"
    13  	"internal/runtime/atomic"
    14  	"internal/runtime/gc"
    15  	"internal/runtime/sys"
    16  	"unsafe"
    17  )
    18  
    19  var Fadd64 = fadd64
    20  var Fsub64 = fsub64
    21  var Fmul64 = fmul64
    22  var Fdiv64 = fdiv64
    23  var F64to32 = f64to32
    24  var F32to64 = f32to64
    25  var Fcmp64 = fcmp64
    26  var Fintto64 = fintto64
    27  var F64toint = f64toint
    28  
    29  var Entersyscall = entersyscall
    30  var Exitsyscall = exitsyscall
    31  var LockedOSThread = lockedOSThread
    32  var Xadduintptr = atomic.Xadduintptr
    33  
    34  var ReadRandomFailed = &readRandomFailed
    35  
    36  var Fastlog2 = fastlog2
    37  
    38  var ParseByteCount = parseByteCount
    39  
    40  var Nanotime = nanotime
    41  var Cputicks = cputicks
    42  var CyclesPerSecond = pprof_cyclesPerSecond
    43  var NetpollBreak = netpollBreak
    44  var Usleep = usleep
    45  
    46  var PhysPageSize = physPageSize
    47  var PhysHugePageSize = physHugePageSize
    48  
    49  var NetpollGenericInit = netpollGenericInit
    50  
    51  var Memmove = memmove
    52  var MemclrNoHeapPointers = memclrNoHeapPointers
    53  
    54  var CgoCheckPointer = cgoCheckPointer
    55  
    56  const CrashStackImplemented = crashStackImplemented
    57  
    58  const TracebackInnerFrames = tracebackInnerFrames
    59  const TracebackOuterFrames = tracebackOuterFrames
    60  
    61  var LockPartialOrder = lockPartialOrder
    62  
    63  type TimeTimer = timeTimer
    64  
    65  type LockRank lockRank
    66  
    67  func (l LockRank) String() string {
    68  	return lockRank(l).String()
    69  }
    70  
    71  const PreemptMSupported = preemptMSupported
    72  
    73  type LFNode struct {
    74  	Next    uint64
    75  	Pushcnt uintptr
    76  }
    77  
    78  func LFStackPush(head *uint64, node *LFNode) {
    79  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    80  }
    81  
    82  func LFStackPop(head *uint64) *LFNode {
    83  	return (*LFNode)((*lfstack)(head).pop())
    84  }
    85  func LFNodeValidate(node *LFNode) {
    86  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    87  }
    88  
    89  func Netpoll(delta int64) {
    90  	systemstack(func() {
    91  		netpoll(delta)
    92  	})
    93  }
    94  
    95  func PointerMask(x any) (ret []byte) {
    96  	systemstack(func() {
    97  		ret = pointerMask(x)
    98  	})
    99  	return
   100  }
   101  
   102  func RunSchedLocalQueueTest() {
   103  	pp := new(p)
   104  	gs := make([]g, len(pp.runq))
   105  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   106  	for i := 0; i < len(pp.runq); i++ {
   107  		if g, _ := runqget(pp); g != nil {
   108  			throw("runq is not empty initially")
   109  		}
   110  		for j := 0; j < i; j++ {
   111  			runqput(pp, &gs[i], false)
   112  		}
   113  		for j := 0; j < i; j++ {
   114  			if g, _ := runqget(pp); g != &gs[i] {
   115  				print("bad element at iter ", i, "/", j, "\n")
   116  				throw("bad element")
   117  			}
   118  		}
   119  		if g, _ := runqget(pp); g != nil {
   120  			throw("runq is not empty afterwards")
   121  		}
   122  	}
   123  }
   124  
   125  func RunSchedLocalQueueStealTest() {
   126  	p1 := new(p)
   127  	p2 := new(p)
   128  	gs := make([]g, len(p1.runq))
   129  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   130  	for i := 0; i < len(p1.runq); i++ {
   131  		for j := 0; j < i; j++ {
   132  			gs[j].sig = 0
   133  			runqput(p1, &gs[j], false)
   134  		}
   135  		gp := runqsteal(p2, p1, true)
   136  		s := 0
   137  		if gp != nil {
   138  			s++
   139  			gp.sig++
   140  		}
   141  		for {
   142  			gp, _ = runqget(p2)
   143  			if gp == nil {
   144  				break
   145  			}
   146  			s++
   147  			gp.sig++
   148  		}
   149  		for {
   150  			gp, _ = runqget(p1)
   151  			if gp == nil {
   152  				break
   153  			}
   154  			gp.sig++
   155  		}
   156  		for j := 0; j < i; j++ {
   157  			if gs[j].sig != 1 {
   158  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   159  				throw("bad element")
   160  			}
   161  		}
   162  		if s != i/2 && s != i/2+1 {
   163  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   164  			throw("bad steal")
   165  		}
   166  	}
   167  }
   168  
   169  func RunSchedLocalQueueEmptyTest(iters int) {
   170  	// Test that runq is not spuriously reported as empty.
   171  	// Runq emptiness affects scheduling decisions and spurious emptiness
   172  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   173  	// for arbitrary long time).
   174  	done := make(chan bool, 1)
   175  	p := new(p)
   176  	gs := make([]g, 2)
   177  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   178  	ready := new(uint32)
   179  	for i := 0; i < iters; i++ {
   180  		*ready = 0
   181  		next0 := (i & 1) == 0
   182  		next1 := (i & 2) == 0
   183  		runqput(p, &gs[0], next0)
   184  		go func() {
   185  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   186  			}
   187  			if runqempty(p) {
   188  				println("next:", next0, next1)
   189  				throw("queue is empty")
   190  			}
   191  			done <- true
   192  		}()
   193  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   194  		}
   195  		runqput(p, &gs[1], next1)
   196  		runqget(p)
   197  		<-done
   198  		runqget(p)
   199  	}
   200  }
   201  
   202  var (
   203  	StringHash = stringHash
   204  	BytesHash  = bytesHash
   205  	Int32Hash  = int32Hash
   206  	Int64Hash  = int64Hash
   207  	MemHash    = memhash
   208  	MemHash32  = memhash32
   209  	MemHash64  = memhash64
   210  	EfaceHash  = efaceHash
   211  	IfaceHash  = ifaceHash
   212  )
   213  
   214  var UseAeshash = &useAeshash
   215  
   216  func MemclrBytes(b []byte) {
   217  	s := (*slice)(unsafe.Pointer(&b))
   218  	memclrNoHeapPointers(s.array, uintptr(s.len))
   219  }
   220  
   221  const HashLoad = hashLoad
   222  
   223  // entry point for testing
   224  func GostringW(w []uint16) (s string) {
   225  	systemstack(func() {
   226  		s = gostringw(&w[0])
   227  	})
   228  	return
   229  }
   230  
   231  var Open = open
   232  var Close = closefd
   233  var Read = read
   234  var Write = write
   235  
   236  func Envs() []string     { return envs }
   237  func SetEnvs(e []string) { envs = e }
   238  
   239  const PtrSize = goarch.PtrSize
   240  
   241  const ClobberdeadPtr = clobberdeadPtr
   242  
   243  func Clobberfree() bool {
   244  	return debug.clobberfree != 0
   245  }
   246  
   247  var ForceGCPeriod = &forcegcperiod
   248  
   249  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   250  // the "environment" traceback level, so later calls to
   251  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   252  func SetTracebackEnv(level string) {
   253  	setTraceback(level)
   254  	traceback_env = traceback_cache
   255  }
   256  
   257  var ReadUnaligned32 = readUnaligned32
   258  var ReadUnaligned64 = readUnaligned64
   259  
   260  func CountPagesInUse() (pagesInUse, counted uintptr) {
   261  	stw := stopTheWorld(stwForTestCountPagesInUse)
   262  
   263  	pagesInUse = mheap_.pagesInUse.Load()
   264  
   265  	for _, s := range mheap_.allspans {
   266  		if s.state.get() == mSpanInUse {
   267  			counted += s.npages
   268  		}
   269  	}
   270  
   271  	startTheWorld(stw)
   272  
   273  	return
   274  }
   275  
   276  func Fastrand() uint32          { return uint32(rand()) }
   277  func Fastrand64() uint64        { return rand() }
   278  func Fastrandn(n uint32) uint32 { return randn(n) }
   279  
   280  type ProfBuf profBuf
   281  
   282  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   283  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   284  }
   285  
   286  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   287  	(*profBuf)(p).write(tag, now, hdr, stk)
   288  }
   289  
   290  const (
   291  	ProfBufBlocking    = profBufBlocking
   292  	ProfBufNonBlocking = profBufNonBlocking
   293  )
   294  
   295  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   296  	return (*profBuf)(p).read(mode)
   297  }
   298  
   299  func (p *ProfBuf) Close() {
   300  	(*profBuf)(p).close()
   301  }
   302  
   303  type CPUStats = cpuStats
   304  
   305  func ReadCPUStats() CPUStats {
   306  	return work.cpuStats
   307  }
   308  
   309  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   310  	stw := stopTheWorld(stwForTestReadMetricsSlow)
   311  
   312  	// Initialize the metrics beforehand because this could
   313  	// allocate and skew the stats.
   314  	metricsLock()
   315  	initMetrics()
   316  
   317  	systemstack(func() {
   318  		// Donate the racectx to g0. readMetricsLocked calls into the race detector
   319  		// via map access.
   320  		getg().racectx = getg().m.curg.racectx
   321  
   322  		// Read the metrics once before in case it allocates and skews the metrics.
   323  		// readMetricsLocked is designed to only allocate the first time it is called
   324  		// with a given slice of samples. In effect, this extra read tests that this
   325  		// remains true, since otherwise the second readMetricsLocked below could
   326  		// allocate before it returns.
   327  		readMetricsLocked(samplesp, len, cap)
   328  
   329  		// Read memstats first. It's going to flush
   330  		// the mcaches which readMetrics does not do, so
   331  		// going the other way around may result in
   332  		// inconsistent statistics.
   333  		readmemstats_m(memStats)
   334  
   335  		// Read metrics again. We need to be sure we're on the
   336  		// system stack with readmemstats_m so that we don't call into
   337  		// the stack allocator and adjust metrics between there and here.
   338  		readMetricsLocked(samplesp, len, cap)
   339  
   340  		// Undo the donation.
   341  		getg().racectx = 0
   342  	})
   343  	metricsUnlock()
   344  
   345  	startTheWorld(stw)
   346  }
   347  
   348  var DoubleCheckReadMemStats = &doubleCheckReadMemStats
   349  
   350  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   351  // MemStats accumulated by scanning the heap.
   352  func ReadMemStatsSlow() (base, slow MemStats) {
   353  	stw := stopTheWorld(stwForTestReadMemStatsSlow)
   354  
   355  	// Run on the system stack to avoid stack growth allocation.
   356  	systemstack(func() {
   357  		// Make sure stats don't change.
   358  		getg().m.mallocing++
   359  
   360  		readmemstats_m(&base)
   361  
   362  		// Initialize slow from base and zero the fields we're
   363  		// recomputing.
   364  		slow = base
   365  		slow.Alloc = 0
   366  		slow.TotalAlloc = 0
   367  		slow.Mallocs = 0
   368  		slow.Frees = 0
   369  		slow.HeapReleased = 0
   370  		var bySize [gc.NumSizeClasses]struct {
   371  			Mallocs, Frees uint64
   372  		}
   373  
   374  		// Add up current allocations in spans.
   375  		for _, s := range mheap_.allspans {
   376  			if s.state.get() != mSpanInUse {
   377  				continue
   378  			}
   379  			if s.isUnusedUserArenaChunk() {
   380  				continue
   381  			}
   382  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   383  				slow.Mallocs++
   384  				slow.Alloc += uint64(s.elemsize)
   385  			} else {
   386  				slow.Mallocs += uint64(s.allocCount)
   387  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   388  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   389  			}
   390  		}
   391  
   392  		// Add in frees by just reading the stats for those directly.
   393  		var m heapStatsDelta
   394  		memstats.heapStats.unsafeRead(&m)
   395  
   396  		// Collect per-sizeclass free stats.
   397  		var smallFree uint64
   398  		for i := 0; i < gc.NumSizeClasses; i++ {
   399  			slow.Frees += m.smallFreeCount[i]
   400  			bySize[i].Frees += m.smallFreeCount[i]
   401  			bySize[i].Mallocs += m.smallFreeCount[i]
   402  			smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
   403  		}
   404  		slow.Frees += m.tinyAllocCount + m.largeFreeCount
   405  		slow.Mallocs += slow.Frees
   406  
   407  		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
   408  
   409  		for i := range slow.BySize {
   410  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   411  			slow.BySize[i].Frees = bySize[i].Frees
   412  		}
   413  
   414  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   415  			chunk := mheap_.pages.tryChunkOf(i)
   416  			if chunk == nil {
   417  				continue
   418  			}
   419  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   420  			slow.HeapReleased += uint64(pg) * pageSize
   421  		}
   422  		for _, p := range allp {
   423  			// Only count scav bits for pages in the cache
   424  			pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
   425  			slow.HeapReleased += uint64(pg) * pageSize
   426  		}
   427  
   428  		getg().m.mallocing--
   429  	})
   430  
   431  	startTheWorld(stw)
   432  	return
   433  }
   434  
   435  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   436  // and verifies that unwinding the new stack doesn't crash, even if the old
   437  // stack has been freed or reused (simulated via poisoning).
   438  func ShrinkStackAndVerifyFramePointers() {
   439  	before := stackPoisonCopy
   440  	defer func() { stackPoisonCopy = before }()
   441  	stackPoisonCopy = 1
   442  
   443  	gp := getg()
   444  	systemstack(func() {
   445  		shrinkstack(gp)
   446  	})
   447  	// If our new stack contains frame pointers into the old stack, this will
   448  	// crash because the old stack has been poisoned.
   449  	FPCallers(make([]uintptr, 1024))
   450  }
   451  
   452  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   453  // stderr, and blocks in a stack containing
   454  // "runtime.blockOnSystemStackInternal".
   455  func BlockOnSystemStack() {
   456  	systemstack(blockOnSystemStackInternal)
   457  }
   458  
   459  func blockOnSystemStackInternal() {
   460  	print("x\n")
   461  	lock(&deadlock)
   462  	lock(&deadlock)
   463  }
   464  
   465  type RWMutex struct {
   466  	rw rwmutex
   467  }
   468  
   469  func (rw *RWMutex) Init() {
   470  	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
   471  }
   472  
   473  func (rw *RWMutex) RLock() {
   474  	rw.rw.rlock()
   475  }
   476  
   477  func (rw *RWMutex) RUnlock() {
   478  	rw.rw.runlock()
   479  }
   480  
   481  func (rw *RWMutex) Lock() {
   482  	rw.rw.lock()
   483  }
   484  
   485  func (rw *RWMutex) Unlock() {
   486  	rw.rw.unlock()
   487  }
   488  
   489  func LockOSCounts() (external, internal uint32) {
   490  	gp := getg()
   491  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   492  		if gp.lockedm != 0 {
   493  			panic("lockedm on non-locked goroutine")
   494  		}
   495  	} else {
   496  		if gp.lockedm == 0 {
   497  			panic("nil lockedm on locked goroutine")
   498  		}
   499  	}
   500  	return gp.m.lockedExt, gp.m.lockedInt
   501  }
   502  
   503  //go:noinline
   504  func TracebackSystemstack(stk []uintptr, i int) int {
   505  	if i == 0 {
   506  		pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
   507  		var u unwinder
   508  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   509  		return tracebackPCs(&u, 0, stk)
   510  	}
   511  	n := 0
   512  	systemstack(func() {
   513  		n = TracebackSystemstack(stk, i-1)
   514  	})
   515  	return n
   516  }
   517  
   518  func KeepNArenaHints(n int) {
   519  	hint := mheap_.arenaHints
   520  	for i := 1; i < n; i++ {
   521  		hint = hint.next
   522  		if hint == nil {
   523  			return
   524  		}
   525  	}
   526  	hint.next = nil
   527  }
   528  
   529  // MapNextArenaHint reserves a page at the next arena growth hint,
   530  // preventing the arena from growing there, and returns the range of
   531  // addresses that are no longer viable.
   532  //
   533  // This may fail to reserve memory. If it fails, it still returns the
   534  // address range it attempted to reserve.
   535  func MapNextArenaHint() (start, end uintptr, ok bool) {
   536  	hint := mheap_.arenaHints
   537  	addr := hint.addr
   538  	if hint.down {
   539  		start, end = addr-heapArenaBytes, addr
   540  		addr -= physPageSize
   541  	} else {
   542  		start, end = addr, addr+heapArenaBytes
   543  	}
   544  	got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
   545  	ok = (addr == uintptr(got))
   546  	if !ok {
   547  		// We were unable to get the requested reservation.
   548  		// Release what we did get and fail.
   549  		sysFreeOS(got, physPageSize)
   550  	}
   551  	return
   552  }
   553  
   554  func NextArenaHint() (uintptr, bool) {
   555  	if mheap_.arenaHints == nil {
   556  		return 0, false
   557  	}
   558  	return mheap_.arenaHints.addr, true
   559  }
   560  
   561  type G = g
   562  
   563  type Sudog = sudog
   564  
   565  type XRegPerG = xRegPerG
   566  
   567  func Getg() *G {
   568  	return getg()
   569  }
   570  
   571  func Goid() uint64 {
   572  	return getg().goid
   573  }
   574  
   575  func GIsWaitingOnMutex(gp *G) bool {
   576  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   577  }
   578  
   579  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   580  
   581  //go:noinline
   582  func PanicForTesting(b []byte, i int) byte {
   583  	return unexportedPanicForTesting(b, i)
   584  }
   585  
   586  //go:noinline
   587  func unexportedPanicForTesting(b []byte, i int) byte {
   588  	return b[i]
   589  }
   590  
   591  func G0StackOverflow() {
   592  	systemstack(func() {
   593  		g0 := getg()
   594  		sp := sys.GetCallerSP()
   595  		// The stack bounds for g0 stack is not always precise.
   596  		// Use an artificially small stack, to trigger a stack overflow
   597  		// without actually run out of the system stack (which may seg fault).
   598  		g0.stack.lo = sp - 4096 - stackSystem
   599  		g0.stackguard0 = g0.stack.lo + stackGuard
   600  		g0.stackguard1 = g0.stackguard0
   601  
   602  		stackOverflow(nil)
   603  	})
   604  }
   605  
   606  func stackOverflow(x *byte) {
   607  	var buf [256]byte
   608  	stackOverflow(&buf[0])
   609  }
   610  
   611  func RunGetgThreadSwitchTest() {
   612  	// Test that getg works correctly with thread switch.
   613  	// With gccgo, if we generate getg inlined, the backend
   614  	// may cache the address of the TLS variable, which
   615  	// will become invalid after a thread switch. This test
   616  	// checks that the bad caching doesn't happen.
   617  
   618  	ch := make(chan int)
   619  	go func(ch chan int) {
   620  		ch <- 5
   621  		LockOSThread()
   622  	}(ch)
   623  
   624  	g1 := getg()
   625  
   626  	// Block on a receive. This is likely to get us a thread
   627  	// switch. If we yield to the sender goroutine, it will
   628  	// lock the thread, forcing us to resume on a different
   629  	// thread.
   630  	<-ch
   631  
   632  	g2 := getg()
   633  	if g1 != g2 {
   634  		panic("g1 != g2")
   635  	}
   636  
   637  	// Also test getg after some control flow, as the
   638  	// backend is sensitive to control flow.
   639  	g3 := getg()
   640  	if g1 != g3 {
   641  		panic("g1 != g3")
   642  	}
   643  }
   644  
   645  // Expose freegc for testing.
   646  func Freegc(p unsafe.Pointer, size uintptr, noscan bool) {
   647  	freegc(p, size, noscan)
   648  }
   649  
   650  // Expose gcAssistBytes for the current g for testing.
   651  func AssistCredit() int64 {
   652  	assistG := getg()
   653  	if assistG.m.curg != nil {
   654  		assistG = assistG.m.curg
   655  	}
   656  	return assistG.gcAssistBytes
   657  }
   658  
   659  // Expose gcBlackenEnabled for testing.
   660  func GcBlackenEnable() bool {
   661  	// Note we do a non-atomic load here.
   662  	// Some checks against gcBlackenEnabled (e.g., in mallocgc)
   663  	// are currently done via non-atomic load for performance reasons,
   664  	// but other checks are done via atomic load (e.g., in mgcmark.go),
   665  	// so interpreting this value in a test may be subtle.
   666  	return gcBlackenEnabled != 0
   667  }
   668  
   669  const SizeSpecializedMallocEnabled = sizeSpecializedMallocEnabled
   670  
   671  const RuntimeFreegcEnabled = runtimeFreegcEnabled
   672  
   673  const (
   674  	PageSize         = pageSize
   675  	PallocChunkPages = pallocChunkPages
   676  	PageAlloc64Bit   = pageAlloc64Bit
   677  	PallocSumBytes   = pallocSumBytes
   678  )
   679  
   680  // Expose pallocSum for testing.
   681  type PallocSum pallocSum
   682  
   683  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   684  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   685  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   686  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   687  
   688  // Expose pallocBits for testing.
   689  type PallocBits pallocBits
   690  
   691  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   692  	return (*pallocBits)(b).find(npages, searchIdx)
   693  }
   694  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   695  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   696  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   697  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   698  
   699  // SummarizeSlow is a slow but more obviously correct implementation
   700  // of (*pallocBits).summarize. Used for testing.
   701  func SummarizeSlow(b *PallocBits) PallocSum {
   702  	var start, most, end uint
   703  
   704  	const N = uint(len(b)) * 64
   705  	for start < N && (*pageBits)(b).get(start) == 0 {
   706  		start++
   707  	}
   708  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   709  		end++
   710  	}
   711  	run := uint(0)
   712  	for i := uint(0); i < N; i++ {
   713  		if (*pageBits)(b).get(i) == 0 {
   714  			run++
   715  		} else {
   716  			run = 0
   717  		}
   718  		most = max(most, run)
   719  	}
   720  	return PackPallocSum(start, most, end)
   721  }
   722  
   723  // Expose non-trivial helpers for testing.
   724  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   725  
   726  // Given two PallocBits, returns a set of bit ranges where
   727  // they differ.
   728  func DiffPallocBits(a, b *PallocBits) []BitRange {
   729  	ba := (*pageBits)(a)
   730  	bb := (*pageBits)(b)
   731  
   732  	var d []BitRange
   733  	base, size := uint(0), uint(0)
   734  	for i := uint(0); i < uint(len(ba))*64; i++ {
   735  		if ba.get(i) != bb.get(i) {
   736  			if size == 0 {
   737  				base = i
   738  			}
   739  			size++
   740  		} else {
   741  			if size != 0 {
   742  				d = append(d, BitRange{base, size})
   743  			}
   744  			size = 0
   745  		}
   746  	}
   747  	if size != 0 {
   748  		d = append(d, BitRange{base, size})
   749  	}
   750  	return d
   751  }
   752  
   753  // StringifyPallocBits gets the bits in the bit range r from b,
   754  // and returns a string containing the bits as ASCII 0 and 1
   755  // characters.
   756  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   757  	str := ""
   758  	for j := r.I; j < r.I+r.N; j++ {
   759  		if (*pageBits)(b).get(j) != 0 {
   760  			str += "1"
   761  		} else {
   762  			str += "0"
   763  		}
   764  	}
   765  	return str
   766  }
   767  
   768  // Expose pallocData for testing.
   769  type PallocData pallocData
   770  
   771  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   772  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   773  }
   774  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   775  func (d *PallocData) ScavengedSetRange(i, n uint) {
   776  	(*pallocData)(d).scavenged.setRange(i, n)
   777  }
   778  func (d *PallocData) PallocBits() *PallocBits {
   779  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   780  }
   781  func (d *PallocData) Scavenged() *PallocBits {
   782  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   783  }
   784  
   785  // Expose fillAligned for testing.
   786  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   787  
   788  // Expose pageCache for testing.
   789  type PageCache pageCache
   790  
   791  const PageCachePages = pageCachePages
   792  
   793  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   794  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   795  }
   796  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   797  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   798  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   799  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   800  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   801  	return (*pageCache)(c).alloc(npages)
   802  }
   803  func (c *PageCache) Flush(s *PageAlloc) {
   804  	cp := (*pageCache)(c)
   805  	sp := (*pageAlloc)(s)
   806  
   807  	systemstack(func() {
   808  		// None of the tests need any higher-level locking, so we just
   809  		// take the lock internally.
   810  		lock(sp.mheapLock)
   811  		cp.flush(sp)
   812  		unlock(sp.mheapLock)
   813  	})
   814  }
   815  
   816  // Expose chunk index type.
   817  type ChunkIdx chunkIdx
   818  
   819  // Expose pageAlloc for testing. Note that because pageAlloc is
   820  // not in the heap, so is PageAlloc.
   821  type PageAlloc pageAlloc
   822  
   823  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   824  	pp := (*pageAlloc)(p)
   825  
   826  	var addr, scav uintptr
   827  	systemstack(func() {
   828  		// None of the tests need any higher-level locking, so we just
   829  		// take the lock internally.
   830  		lock(pp.mheapLock)
   831  		addr, scav = pp.alloc(npages)
   832  		unlock(pp.mheapLock)
   833  	})
   834  	return addr, scav
   835  }
   836  func (p *PageAlloc) AllocToCache() PageCache {
   837  	pp := (*pageAlloc)(p)
   838  
   839  	var c PageCache
   840  	systemstack(func() {
   841  		// None of the tests need any higher-level locking, so we just
   842  		// take the lock internally.
   843  		lock(pp.mheapLock)
   844  		c = PageCache(pp.allocToCache())
   845  		unlock(pp.mheapLock)
   846  	})
   847  	return c
   848  }
   849  func (p *PageAlloc) Free(base, npages uintptr) {
   850  	pp := (*pageAlloc)(p)
   851  
   852  	systemstack(func() {
   853  		// None of the tests need any higher-level locking, so we just
   854  		// take the lock internally.
   855  		lock(pp.mheapLock)
   856  		pp.free(base, npages)
   857  		unlock(pp.mheapLock)
   858  	})
   859  }
   860  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   861  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   862  }
   863  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   864  	pp := (*pageAlloc)(p)
   865  	systemstack(func() {
   866  		r = pp.scavenge(nbytes, nil, true)
   867  	})
   868  	return
   869  }
   870  func (p *PageAlloc) InUse() []AddrRange {
   871  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   872  	for _, r := range p.inUse.ranges {
   873  		ranges = append(ranges, AddrRange{r})
   874  	}
   875  	return ranges
   876  }
   877  
   878  // Returns nil if the PallocData's L2 is missing.
   879  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   880  	ci := chunkIdx(i)
   881  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   882  }
   883  
   884  // AddrRange is a wrapper around addrRange for testing.
   885  type AddrRange struct {
   886  	addrRange
   887  }
   888  
   889  // MakeAddrRange creates a new address range.
   890  func MakeAddrRange(base, limit uintptr) AddrRange {
   891  	return AddrRange{makeAddrRange(base, limit)}
   892  }
   893  
   894  // Base returns the virtual base address of the address range.
   895  func (a AddrRange) Base() uintptr {
   896  	return a.addrRange.base.addr()
   897  }
   898  
   899  // Base returns the virtual address of the limit of the address range.
   900  func (a AddrRange) Limit() uintptr {
   901  	return a.addrRange.limit.addr()
   902  }
   903  
   904  // Equals returns true if the two address ranges are exactly equal.
   905  func (a AddrRange) Equals(b AddrRange) bool {
   906  	return a == b
   907  }
   908  
   909  // Size returns the size in bytes of the address range.
   910  func (a AddrRange) Size() uintptr {
   911  	return a.addrRange.size()
   912  }
   913  
   914  // testSysStat is the sysStat passed to test versions of various
   915  // runtime structures. We do actually have to keep track of this
   916  // because otherwise memstats.mappedReady won't actually line up
   917  // with other stats in the runtime during tests.
   918  var testSysStat = &memstats.other_sys
   919  
   920  // AddrRanges is a wrapper around addrRanges for testing.
   921  type AddrRanges struct {
   922  	addrRanges
   923  	mutable bool
   924  }
   925  
   926  // NewAddrRanges creates a new empty addrRanges.
   927  //
   928  // Note that this initializes addrRanges just like in the
   929  // runtime, so its memory is persistentalloc'd. Call this
   930  // function sparingly since the memory it allocates is
   931  // leaked.
   932  //
   933  // This AddrRanges is mutable, so we can test methods like
   934  // Add.
   935  func NewAddrRanges() AddrRanges {
   936  	r := addrRanges{}
   937  	r.init(testSysStat)
   938  	return AddrRanges{r, true}
   939  }
   940  
   941  // MakeAddrRanges creates a new addrRanges populated with
   942  // the ranges in a.
   943  //
   944  // The returned AddrRanges is immutable, so methods like
   945  // Add will fail.
   946  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   947  	// Methods that manipulate the backing store of addrRanges.ranges should
   948  	// not be used on the result from this function (e.g. add) since they may
   949  	// trigger reallocation. That would normally be fine, except the new
   950  	// backing store won't come from the heap, but from persistentalloc, so
   951  	// we'll leak some memory implicitly.
   952  	ranges := make([]addrRange, 0, len(a))
   953  	total := uintptr(0)
   954  	for _, r := range a {
   955  		ranges = append(ranges, r.addrRange)
   956  		total += r.Size()
   957  	}
   958  	return AddrRanges{addrRanges{
   959  		ranges:     ranges,
   960  		totalBytes: total,
   961  		sysStat:    testSysStat,
   962  	}, false}
   963  }
   964  
   965  // Ranges returns a copy of the ranges described by the
   966  // addrRanges.
   967  func (a *AddrRanges) Ranges() []AddrRange {
   968  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   969  	for _, r := range a.addrRanges.ranges {
   970  		result = append(result, AddrRange{r})
   971  	}
   972  	return result
   973  }
   974  
   975  // FindSucc returns the successor to base. See addrRanges.findSucc
   976  // for more details.
   977  func (a *AddrRanges) FindSucc(base uintptr) int {
   978  	return a.findSucc(base)
   979  }
   980  
   981  // Add adds a new AddrRange to the AddrRanges.
   982  //
   983  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   984  // otherwise this method will throw.
   985  func (a *AddrRanges) Add(r AddrRange) {
   986  	if !a.mutable {
   987  		throw("attempt to mutate immutable AddrRanges")
   988  	}
   989  	a.add(r.addrRange)
   990  }
   991  
   992  // TotalBytes returns the totalBytes field of the addrRanges.
   993  func (a *AddrRanges) TotalBytes() uintptr {
   994  	return a.addrRanges.totalBytes
   995  }
   996  
   997  // BitRange represents a range over a bitmap.
   998  type BitRange struct {
   999  	I, N uint // bit index and length in bits
  1000  }
  1001  
  1002  // NewPageAlloc creates a new page allocator for testing and
  1003  // initializes it with the scav and chunks maps. Each key in these maps
  1004  // represents a chunk index and each value is a series of bit ranges to
  1005  // set within each bitmap's chunk.
  1006  //
  1007  // The initialization of the pageAlloc preserves the invariant that if a
  1008  // scavenged bit is set the alloc bit is necessarily unset, so some
  1009  // of the bits described by scav may be cleared in the final bitmap if
  1010  // ranges in chunks overlap with them.
  1011  //
  1012  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1013  // (as opposed to all 1s, which it usually is). Furthermore, every
  1014  // chunk index in scav must appear in chunks; ones that do not are
  1015  // ignored.
  1016  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1017  	p := new(pageAlloc)
  1018  
  1019  	// We've got an entry, so initialize the pageAlloc.
  1020  	p.init(new(mutex), testSysStat, true)
  1021  	lockInit(p.mheapLock, lockRankMheap)
  1022  	for i, init := range chunks {
  1023  		addr := chunkBase(chunkIdx(i))
  1024  
  1025  		// Mark the chunk's existence in the pageAlloc.
  1026  		systemstack(func() {
  1027  			lock(p.mheapLock)
  1028  			p.grow(addr, pallocChunkBytes)
  1029  			unlock(p.mheapLock)
  1030  		})
  1031  
  1032  		// Initialize the bitmap and update pageAlloc metadata.
  1033  		ci := chunkIndex(addr)
  1034  		chunk := p.chunkOf(ci)
  1035  
  1036  		// Clear all the scavenged bits which grow set.
  1037  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1038  
  1039  		// Simulate the allocation and subsequent free of all pages in
  1040  		// the chunk for the scavenge index. This sets the state equivalent
  1041  		// with all pages within the index being free.
  1042  		p.scav.index.alloc(ci, pallocChunkPages)
  1043  		p.scav.index.free(ci, 0, pallocChunkPages)
  1044  
  1045  		// Apply scavenge state if applicable.
  1046  		if scav != nil {
  1047  			if scvg, ok := scav[i]; ok {
  1048  				for _, s := range scvg {
  1049  					// Ignore the case of s.N == 0. setRange doesn't handle
  1050  					// it and it's a no-op anyway.
  1051  					if s.N != 0 {
  1052  						chunk.scavenged.setRange(s.I, s.N)
  1053  					}
  1054  				}
  1055  			}
  1056  		}
  1057  
  1058  		// Apply alloc state.
  1059  		for _, s := range init {
  1060  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1061  			// it and it's a no-op anyway.
  1062  			if s.N != 0 {
  1063  				chunk.allocRange(s.I, s.N)
  1064  
  1065  				// Make sure the scavenge index is updated.
  1066  				p.scav.index.alloc(ci, s.N)
  1067  			}
  1068  		}
  1069  
  1070  		// Update heap metadata for the allocRange calls above.
  1071  		systemstack(func() {
  1072  			lock(p.mheapLock)
  1073  			p.update(addr, pallocChunkPages, false, false)
  1074  			unlock(p.mheapLock)
  1075  		})
  1076  	}
  1077  
  1078  	return (*PageAlloc)(p)
  1079  }
  1080  
  1081  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1082  // is called the pageAlloc may no longer be used. The object itself will be
  1083  // collected by the garbage collector once it is no longer live.
  1084  func FreePageAlloc(pp *PageAlloc) {
  1085  	p := (*pageAlloc)(pp)
  1086  
  1087  	// Free all the mapped space for the summary levels.
  1088  	if pageAlloc64Bit != 0 {
  1089  		for l := 0; l < summaryLevels; l++ {
  1090  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1091  		}
  1092  	} else {
  1093  		resSize := uintptr(0)
  1094  		for _, s := range p.summary {
  1095  			resSize += uintptr(cap(s)) * pallocSumBytes
  1096  		}
  1097  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1098  	}
  1099  
  1100  	// Free extra data structures.
  1101  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1102  
  1103  	// Subtract back out whatever we mapped for the summaries.
  1104  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1105  	// (and in anger should actually be accounted for), and there's no other
  1106  	// way to figure out how much we actually mapped.
  1107  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1108  	testSysStat.add(-int64(p.summaryMappedReady))
  1109  
  1110  	// Free the mapped space for chunks.
  1111  	for i := range p.chunks {
  1112  		if x := p.chunks[i]; x != nil {
  1113  			p.chunks[i] = nil
  1114  			// This memory comes from sysAlloc and will always be page-aligned.
  1115  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1116  		}
  1117  	}
  1118  }
  1119  
  1120  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1121  // 64 bit and 32 bit platforms, allowing the tests to share code
  1122  // between the two.
  1123  //
  1124  // This should not be higher than 0x100*pallocChunkBytes to support
  1125  // mips and mipsle, which only have 31-bit address spaces.
  1126  var BaseChunkIdx = func() ChunkIdx {
  1127  	var prefix uintptr
  1128  	if pageAlloc64Bit != 0 {
  1129  		prefix = 0xc000
  1130  	} else {
  1131  		prefix = 0x100
  1132  	}
  1133  	baseAddr := prefix * pallocChunkBytes
  1134  	if goos.IsAix != 0 {
  1135  		baseAddr += arenaBaseOffset
  1136  	}
  1137  	return ChunkIdx(chunkIndex(baseAddr))
  1138  }()
  1139  
  1140  // PageBase returns an address given a chunk index and a page index
  1141  // relative to that chunk.
  1142  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1143  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1144  }
  1145  
  1146  type BitsMismatch struct {
  1147  	Base      uintptr
  1148  	Got, Want uint64
  1149  }
  1150  
  1151  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1152  	ok = true
  1153  
  1154  	// Run on the system stack to avoid stack growth allocation.
  1155  	systemstack(func() {
  1156  		getg().m.mallocing++
  1157  
  1158  		// Lock so that we can safely access the bitmap.
  1159  		lock(&mheap_.lock)
  1160  
  1161  	chunkLoop:
  1162  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1163  			chunk := mheap_.pages.tryChunkOf(i)
  1164  			if chunk == nil {
  1165  				continue
  1166  			}
  1167  			cb := chunkBase(i)
  1168  			for j := 0; j < pallocChunkPages/64; j++ {
  1169  				// Run over each 64-bit bitmap section and ensure
  1170  				// scavenged is being cleared properly on allocation.
  1171  				// If a used bit and scavenged bit are both set, that's
  1172  				// an error, and could indicate a larger problem, or
  1173  				// an accounting problem.
  1174  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1175  				got := chunk.scavenged[j]
  1176  				if want != got {
  1177  					ok = false
  1178  					if n >= len(mismatches) {
  1179  						break chunkLoop
  1180  					}
  1181  					mismatches[n] = BitsMismatch{
  1182  						Base: cb + uintptr(j)*64*pageSize,
  1183  						Got:  got,
  1184  						Want: want,
  1185  					}
  1186  					n++
  1187  				}
  1188  			}
  1189  		}
  1190  		unlock(&mheap_.lock)
  1191  
  1192  		getg().m.mallocing--
  1193  	})
  1194  
  1195  	if randomizeHeapBase && len(mismatches) > 0 {
  1196  		// When goexperiment.RandomizedHeapBase64 is set we use a series of
  1197  		// padding pages to generate randomized heap base address which have
  1198  		// both the alloc and scav bits set. Because of this we expect exactly
  1199  		// one arena will have mismatches, so check for that explicitly and
  1200  		// remove the mismatches if that property holds. If we see more than one
  1201  		// arena with this property, that is an indication something has
  1202  		// actually gone wrong, so return the mismatches.
  1203  		//
  1204  		// We do this, instead of ignoring the mismatches in the chunkLoop, because
  1205  		// it's not easy to determine which arena we added the padding pages to
  1206  		// programmatically, without explicitly recording the base address somewhere
  1207  		// in a global variable (which we'd rather not do as the address of that variable
  1208  		// is likely to be somewhat predictable, potentially defeating the purpose
  1209  		// of our randomization).
  1210  		affectedArenas := map[arenaIdx]bool{}
  1211  		for _, mismatch := range mismatches {
  1212  			if mismatch.Base > 0 {
  1213  				affectedArenas[arenaIndex(mismatch.Base)] = true
  1214  			}
  1215  		}
  1216  		if len(affectedArenas) == 1 {
  1217  			ok = true
  1218  			// zero the mismatches
  1219  			for i := range n {
  1220  				mismatches[i] = BitsMismatch{}
  1221  			}
  1222  		}
  1223  	}
  1224  
  1225  	return
  1226  }
  1227  
  1228  func PageCachePagesLeaked() (leaked uintptr) {
  1229  	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
  1230  
  1231  	// Walk over destroyed Ps and look for unflushed caches.
  1232  	deadp := allp[len(allp):cap(allp)]
  1233  	for _, p := range deadp {
  1234  		// Since we're going past len(allp) we may see nil Ps.
  1235  		// Just ignore them.
  1236  		if p != nil {
  1237  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1238  		}
  1239  	}
  1240  
  1241  	startTheWorld(stw)
  1242  	return
  1243  }
  1244  
  1245  var ProcYield = procyield
  1246  var OSYield = osyield
  1247  
  1248  type Mutex = mutex
  1249  
  1250  var Lock = lock
  1251  var Unlock = unlock
  1252  
  1253  var MutexContended = mutexContended
  1254  
  1255  func SemRootLock(addr *uint32) *mutex {
  1256  	root := semtable.rootFor(addr)
  1257  	return &root.lock
  1258  }
  1259  
  1260  var Semacquire = semacquire
  1261  var Semrelease1 = semrelease1
  1262  
  1263  func SemNwait(addr *uint32) uint32 {
  1264  	root := semtable.rootFor(addr)
  1265  	return root.nwait.Load()
  1266  }
  1267  
  1268  const SemTableSize = semTabSize
  1269  
  1270  // SemTable is a wrapper around semTable exported for testing.
  1271  type SemTable struct {
  1272  	semTable
  1273  }
  1274  
  1275  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1276  func (t *SemTable) Enqueue(addr *uint32) {
  1277  	s := acquireSudog()
  1278  	s.releasetime = 0
  1279  	s.acquiretime = 0
  1280  	s.ticket = 0
  1281  	t.semTable.rootFor(addr).queue(addr, s, false)
  1282  }
  1283  
  1284  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1285  //
  1286  // Returns true if there actually was a waiter to be dequeued.
  1287  func (t *SemTable) Dequeue(addr *uint32) bool {
  1288  	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
  1289  	if s != nil {
  1290  		releaseSudog(s)
  1291  		return true
  1292  	}
  1293  	return false
  1294  }
  1295  
  1296  // mspan wrapper for testing.
  1297  type MSpan mspan
  1298  
  1299  // Allocate an mspan for testing.
  1300  func AllocMSpan() *MSpan {
  1301  	var s *mspan
  1302  	systemstack(func() {
  1303  		lock(&mheap_.lock)
  1304  		s = (*mspan)(mheap_.spanalloc.alloc())
  1305  		s.init(0, 0)
  1306  		unlock(&mheap_.lock)
  1307  	})
  1308  	return (*MSpan)(s)
  1309  }
  1310  
  1311  // Free an allocated mspan.
  1312  func FreeMSpan(s *MSpan) {
  1313  	systemstack(func() {
  1314  		lock(&mheap_.lock)
  1315  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1316  		unlock(&mheap_.lock)
  1317  	})
  1318  }
  1319  
  1320  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1321  	s := (*mspan)(ms)
  1322  	s.nelems = uint16(len(bits) * 8)
  1323  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1324  	result := s.countAlloc()
  1325  	s.gcmarkBits = nil
  1326  	return result
  1327  }
  1328  
  1329  const (
  1330  	TimeHistSubBucketBits = timeHistSubBucketBits
  1331  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1332  	TimeHistNumBuckets    = timeHistNumBuckets
  1333  	TimeHistMinBucketBits = timeHistMinBucketBits
  1334  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1335  )
  1336  
  1337  type TimeHistogram timeHistogram
  1338  
  1339  // Count returns the counts for the given bucket, subBucket indices.
  1340  // Returns true if the bucket was valid, otherwise returns the counts
  1341  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1342  // bucket < 0, and false.
  1343  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1344  	t := (*timeHistogram)(th)
  1345  	if bucket < 0 {
  1346  		return t.underflow.Load(), false
  1347  	}
  1348  	i := bucket*TimeHistNumSubBuckets + subBucket
  1349  	if i >= len(t.counts) {
  1350  		return t.overflow.Load(), false
  1351  	}
  1352  	return t.counts[i].Load(), true
  1353  }
  1354  
  1355  func (th *TimeHistogram) Record(duration int64) {
  1356  	(*timeHistogram)(th).record(duration)
  1357  }
  1358  
  1359  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1360  
  1361  func SetIntArgRegs(a int) int {
  1362  	lock(&finlock)
  1363  	old := intArgRegs
  1364  	if a >= 0 {
  1365  		intArgRegs = a
  1366  	}
  1367  	unlock(&finlock)
  1368  	return old
  1369  }
  1370  
  1371  func FinalizerGAsleep() bool {
  1372  	return fingStatus.Load()&fingWait != 0
  1373  }
  1374  
  1375  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1376  // extra layer of call, since then there's a return before the "real"
  1377  // next call.
  1378  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1379  
  1380  // For GCTestIsReachable, it's important that we do this as a call so
  1381  // escape analysis can see through it.
  1382  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1383  	return gcTestIsReachable(ptrs...)
  1384  }
  1385  
  1386  // For GCTestPointerClass, it's important that we do this as a call so
  1387  // escape analysis can see through it.
  1388  //
  1389  // This is nosplit because gcTestPointerClass is.
  1390  //
  1391  //go:nosplit
  1392  func GCTestPointerClass(p unsafe.Pointer) string {
  1393  	return gcTestPointerClass(p)
  1394  }
  1395  
  1396  const Raceenabled = raceenabled
  1397  
  1398  const (
  1399  	GCBackgroundUtilization            = gcBackgroundUtilization
  1400  	GCGoalUtilization                  = gcGoalUtilization
  1401  	DefaultHeapMinimum                 = defaultHeapMinimum
  1402  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1403  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1404  )
  1405  
  1406  type GCController struct {
  1407  	gcControllerState
  1408  }
  1409  
  1410  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1411  	// Force the controller to escape. We're going to
  1412  	// do 64-bit atomics on it, and if it gets stack-allocated
  1413  	// on a 32-bit architecture, it may get allocated unaligned
  1414  	// space.
  1415  	g := Escape(new(GCController))
  1416  	g.gcControllerState.test = true // Mark it as a test copy.
  1417  	g.init(int32(gcPercent), memoryLimit)
  1418  	return g
  1419  }
  1420  
  1421  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1422  	trigger, _ := c.trigger()
  1423  	if c.heapMarked > trigger {
  1424  		trigger = c.heapMarked
  1425  	}
  1426  	c.maxStackScan.Store(stackSize)
  1427  	c.globalsScan.Store(globalsSize)
  1428  	c.heapLive.Store(trigger)
  1429  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1430  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1431  }
  1432  
  1433  func (c *GCController) AssistWorkPerByte() float64 {
  1434  	return c.assistWorkPerByte.Load()
  1435  }
  1436  
  1437  func (c *GCController) HeapGoal() uint64 {
  1438  	return c.heapGoal()
  1439  }
  1440  
  1441  func (c *GCController) HeapLive() uint64 {
  1442  	return c.heapLive.Load()
  1443  }
  1444  
  1445  func (c *GCController) HeapMarked() uint64 {
  1446  	return c.heapMarked
  1447  }
  1448  
  1449  func (c *GCController) Triggered() uint64 {
  1450  	return c.triggered
  1451  }
  1452  
  1453  type GCControllerReviseDelta struct {
  1454  	HeapLive        int64
  1455  	HeapScan        int64
  1456  	HeapScanWork    int64
  1457  	StackScanWork   int64
  1458  	GlobalsScanWork int64
  1459  }
  1460  
  1461  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1462  	c.heapLive.Add(d.HeapLive)
  1463  	c.heapScan.Add(d.HeapScan)
  1464  	c.heapScanWork.Add(d.HeapScanWork)
  1465  	c.stackScanWork.Add(d.StackScanWork)
  1466  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1467  	c.revise()
  1468  }
  1469  
  1470  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1471  	c.assistTime.Store(assistTime)
  1472  	c.endCycle(elapsed, gomaxprocs, false)
  1473  	c.resetLive(bytesMarked)
  1474  	c.commit(false)
  1475  }
  1476  
  1477  func (c *GCController) AddIdleMarkWorker() bool {
  1478  	return c.addIdleMarkWorker()
  1479  }
  1480  
  1481  func (c *GCController) NeedIdleMarkWorker() bool {
  1482  	return c.needIdleMarkWorker()
  1483  }
  1484  
  1485  func (c *GCController) RemoveIdleMarkWorker() {
  1486  	c.removeIdleMarkWorker()
  1487  }
  1488  
  1489  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1490  	c.setMaxIdleMarkWorkers(max)
  1491  }
  1492  
  1493  var alwaysFalse bool
  1494  var escapeSink any
  1495  
  1496  func Escape[T any](x T) T {
  1497  	if alwaysFalse {
  1498  		escapeSink = x
  1499  	}
  1500  	return x
  1501  }
  1502  
  1503  // Acquirem blocks preemption.
  1504  func Acquirem() {
  1505  	acquirem()
  1506  }
  1507  
  1508  func Releasem() {
  1509  	releasem(getg().m)
  1510  }
  1511  
  1512  // GoschedIfBusy is an explicit preemption check to call back
  1513  // into the scheduler. This is useful for tests that run code
  1514  // which spend most of their time as non-preemptible, as it
  1515  // can be placed right after becoming preemptible again to ensure
  1516  // that the scheduler gets a chance to preempt the goroutine.
  1517  func GoschedIfBusy() {
  1518  	goschedIfBusy()
  1519  }
  1520  
  1521  type PIController struct {
  1522  	piController
  1523  }
  1524  
  1525  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1526  	return &PIController{piController{
  1527  		kp:  kp,
  1528  		ti:  ti,
  1529  		tt:  tt,
  1530  		min: min,
  1531  		max: max,
  1532  	}}
  1533  }
  1534  
  1535  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1536  	return c.piController.next(input, setpoint, period)
  1537  }
  1538  
  1539  const (
  1540  	CapacityPerProc          = capacityPerProc
  1541  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1542  )
  1543  
  1544  type GCCPULimiter struct {
  1545  	limiter gcCPULimiterState
  1546  }
  1547  
  1548  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1549  	// Force the controller to escape. We're going to
  1550  	// do 64-bit atomics on it, and if it gets stack-allocated
  1551  	// on a 32-bit architecture, it may get allocated unaligned
  1552  	// space.
  1553  	l := Escape(new(GCCPULimiter))
  1554  	l.limiter.test = true
  1555  	l.limiter.resetCapacity(now, gomaxprocs)
  1556  	return l
  1557  }
  1558  
  1559  func (l *GCCPULimiter) Fill() uint64 {
  1560  	return l.limiter.bucket.fill
  1561  }
  1562  
  1563  func (l *GCCPULimiter) Capacity() uint64 {
  1564  	return l.limiter.bucket.capacity
  1565  }
  1566  
  1567  func (l *GCCPULimiter) Overflow() uint64 {
  1568  	return l.limiter.overflow
  1569  }
  1570  
  1571  func (l *GCCPULimiter) Limiting() bool {
  1572  	return l.limiter.limiting()
  1573  }
  1574  
  1575  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1576  	return l.limiter.needUpdate(now)
  1577  }
  1578  
  1579  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1580  	l.limiter.startGCTransition(enableGC, now)
  1581  }
  1582  
  1583  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1584  	l.limiter.finishGCTransition(now)
  1585  }
  1586  
  1587  func (l *GCCPULimiter) Update(now int64) {
  1588  	l.limiter.update(now)
  1589  }
  1590  
  1591  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1592  	l.limiter.addAssistTime(t)
  1593  }
  1594  
  1595  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1596  	l.limiter.resetCapacity(now, nprocs)
  1597  }
  1598  
  1599  const ScavengePercent = scavengePercent
  1600  
  1601  type Scavenger struct {
  1602  	Sleep      func(int64) int64
  1603  	Scavenge   func(uintptr) (uintptr, int64)
  1604  	ShouldStop func() bool
  1605  	GoMaxProcs func() int32
  1606  
  1607  	released  atomic.Uintptr
  1608  	scavenger scavengerState
  1609  	stop      chan<- struct{}
  1610  	done      <-chan struct{}
  1611  }
  1612  
  1613  func (s *Scavenger) Start() {
  1614  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1615  		panic("must populate all stubs")
  1616  	}
  1617  
  1618  	// Install hooks.
  1619  	s.scavenger.sleepStub = s.Sleep
  1620  	s.scavenger.scavenge = s.Scavenge
  1621  	s.scavenger.shouldStop = s.ShouldStop
  1622  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1623  
  1624  	// Start up scavenger goroutine, and wait for it to be ready.
  1625  	stop := make(chan struct{})
  1626  	s.stop = stop
  1627  	done := make(chan struct{})
  1628  	s.done = done
  1629  	go func() {
  1630  		// This should match bgscavenge, loosely.
  1631  		s.scavenger.init()
  1632  		s.scavenger.park()
  1633  		for {
  1634  			select {
  1635  			case <-stop:
  1636  				close(done)
  1637  				return
  1638  			default:
  1639  			}
  1640  			released, workTime := s.scavenger.run()
  1641  			if released == 0 {
  1642  				s.scavenger.park()
  1643  				continue
  1644  			}
  1645  			s.released.Add(released)
  1646  			s.scavenger.sleep(workTime)
  1647  		}
  1648  	}()
  1649  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1650  		panic("timed out waiting for scavenger to get ready")
  1651  	}
  1652  }
  1653  
  1654  // BlockUntilParked blocks until the scavenger parks, or until
  1655  // timeout is exceeded. Returns true if the scavenger parked.
  1656  //
  1657  // Note that in testing, parked means something slightly different.
  1658  // In anger, the scavenger parks to sleep, too, but in testing,
  1659  // it only parks when it actually has no work to do.
  1660  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1661  	// Just spin, waiting for it to park.
  1662  	//
  1663  	// The actual parking process is racy with respect to
  1664  	// wakeups, which is fine, but for testing we need something
  1665  	// a bit more robust.
  1666  	start := nanotime()
  1667  	for nanotime()-start < timeout {
  1668  		lock(&s.scavenger.lock)
  1669  		parked := s.scavenger.parked
  1670  		unlock(&s.scavenger.lock)
  1671  		if parked {
  1672  			return true
  1673  		}
  1674  		Gosched()
  1675  	}
  1676  	return false
  1677  }
  1678  
  1679  // Released returns how many bytes the scavenger released.
  1680  func (s *Scavenger) Released() uintptr {
  1681  	return s.released.Load()
  1682  }
  1683  
  1684  // Wake wakes up a parked scavenger to keep running.
  1685  func (s *Scavenger) Wake() {
  1686  	s.scavenger.wake()
  1687  }
  1688  
  1689  // Stop cleans up the scavenger's resources. The scavenger
  1690  // must be parked for this to work.
  1691  func (s *Scavenger) Stop() {
  1692  	lock(&s.scavenger.lock)
  1693  	parked := s.scavenger.parked
  1694  	unlock(&s.scavenger.lock)
  1695  	if !parked {
  1696  		panic("tried to clean up scavenger that is not parked")
  1697  	}
  1698  	close(s.stop)
  1699  	s.Wake()
  1700  	<-s.done
  1701  }
  1702  
  1703  type ScavengeIndex struct {
  1704  	i scavengeIndex
  1705  }
  1706  
  1707  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1708  	s := new(ScavengeIndex)
  1709  	// This is a bit lazy but we easily guarantee we'll be able
  1710  	// to reference all the relevant chunks. The worst-case
  1711  	// memory usage here is 512 MiB, but tests generally use
  1712  	// small offsets from BaseChunkIdx, which results in ~100s
  1713  	// of KiB in memory use.
  1714  	//
  1715  	// This may still be worth making better, at least by sharing
  1716  	// this fairly large array across calls with a sync.Pool or
  1717  	// something. Currently, when the tests are run serially,
  1718  	// it takes around 0.5s. Not all that much, but if we have
  1719  	// a lot of tests like this it could add up.
  1720  	s.i.chunks = make([]atomicScavChunkData, max)
  1721  	s.i.min.Store(uintptr(min))
  1722  	s.i.max.Store(uintptr(max))
  1723  	s.i.minHeapIdx.Store(uintptr(min))
  1724  	s.i.test = true
  1725  	return s
  1726  }
  1727  
  1728  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1729  	ci, off := s.i.find(force)
  1730  	return ChunkIdx(ci), off
  1731  }
  1732  
  1733  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1734  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1735  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1736  
  1737  	if sc == ec {
  1738  		// The range doesn't cross any chunk boundaries.
  1739  		s.i.alloc(sc, ei+1-si)
  1740  	} else {
  1741  		// The range crosses at least one chunk boundary.
  1742  		s.i.alloc(sc, pallocChunkPages-si)
  1743  		for c := sc + 1; c < ec; c++ {
  1744  			s.i.alloc(c, pallocChunkPages)
  1745  		}
  1746  		s.i.alloc(ec, ei+1)
  1747  	}
  1748  }
  1749  
  1750  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1751  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1752  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1753  
  1754  	if sc == ec {
  1755  		// The range doesn't cross any chunk boundaries.
  1756  		s.i.free(sc, si, ei+1-si)
  1757  	} else {
  1758  		// The range crosses at least one chunk boundary.
  1759  		s.i.free(sc, si, pallocChunkPages-si)
  1760  		for c := sc + 1; c < ec; c++ {
  1761  			s.i.free(c, 0, pallocChunkPages)
  1762  		}
  1763  		s.i.free(ec, 0, ei+1)
  1764  	}
  1765  }
  1766  
  1767  func (s *ScavengeIndex) ResetSearchAddrs() {
  1768  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1769  		addr, marked := a.Load()
  1770  		if marked {
  1771  			a.StoreUnmark(addr, addr)
  1772  		}
  1773  		a.Clear()
  1774  	}
  1775  	s.i.freeHWM = minOffAddr
  1776  }
  1777  
  1778  func (s *ScavengeIndex) NextGen() {
  1779  	s.i.nextGen()
  1780  }
  1781  
  1782  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1783  	s.i.setEmpty(chunkIdx(ci))
  1784  }
  1785  
  1786  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1787  	sc0 := scavChunkData{
  1788  		gen:            gen,
  1789  		inUse:          inUse,
  1790  		lastInUse:      lastInUse,
  1791  		scavChunkFlags: scavChunkFlags(flags),
  1792  	}
  1793  	scp := sc0.pack()
  1794  	sc1 := unpackScavChunkData(scp)
  1795  	return sc0 == sc1
  1796  }
  1797  
  1798  const GTrackingPeriod = gTrackingPeriod
  1799  
  1800  var ZeroBase = unsafe.Pointer(&zerobase)
  1801  
  1802  const UserArenaChunkBytes = userArenaChunkBytes
  1803  
  1804  type UserArena struct {
  1805  	arena *userArena
  1806  }
  1807  
  1808  func NewUserArena() *UserArena {
  1809  	return &UserArena{newUserArena()}
  1810  }
  1811  
  1812  func (a *UserArena) New(out *any) {
  1813  	i := efaceOf(out)
  1814  	typ := i._type
  1815  	if typ.Kind() != abi.Pointer {
  1816  		panic("new result of non-ptr type")
  1817  	}
  1818  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1819  	i.data = a.arena.new(typ)
  1820  }
  1821  
  1822  func (a *UserArena) Slice(sl any, cap int) {
  1823  	a.arena.slice(sl, cap)
  1824  }
  1825  
  1826  func (a *UserArena) Free() {
  1827  	a.arena.free()
  1828  }
  1829  
  1830  func GlobalWaitingArenaChunks() int {
  1831  	n := 0
  1832  	systemstack(func() {
  1833  		lock(&mheap_.lock)
  1834  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1835  			n++
  1836  		}
  1837  		unlock(&mheap_.lock)
  1838  	})
  1839  	return n
  1840  }
  1841  
  1842  func UserArenaClone[T any](s T) T {
  1843  	return arena_heapify(s).(T)
  1844  }
  1845  
  1846  var AlignUp = alignUp
  1847  
  1848  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1849  	return blockUntilEmptyFinalizerQueue(timeout)
  1850  }
  1851  
  1852  func BlockUntilEmptyCleanupQueue(timeout int64) bool {
  1853  	return gcCleanups.blockUntilEmpty(timeout)
  1854  }
  1855  
  1856  func FrameStartLine(f *Frame) int {
  1857  	return f.startLine
  1858  }
  1859  
  1860  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1861  // This memory will never be freed; use sparingly.
  1862  func PersistentAlloc(n, align uintptr) unsafe.Pointer {
  1863  	return persistentalloc(n, align, &memstats.other_sys)
  1864  }
  1865  
  1866  const TagAlign = tagAlign
  1867  
  1868  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1869  // pcBuf with the return addresses of the physical frames on the stack.
  1870  func FPCallers(pcBuf []uintptr) int {
  1871  	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
  1872  }
  1873  
  1874  const FramePointerEnabled = framepointer_enabled
  1875  
  1876  var (
  1877  	IsPinned      = isPinned
  1878  	GetPinCounter = pinnerGetPinCounter
  1879  )
  1880  
  1881  func SetPinnerLeakPanic(f func()) {
  1882  	pinnerLeakPanic = f
  1883  }
  1884  func GetPinnerLeakPanic() func() {
  1885  	return pinnerLeakPanic
  1886  }
  1887  
  1888  var testUintptr uintptr
  1889  
  1890  func MyGenericFunc[T any]() {
  1891  	systemstack(func() {
  1892  		testUintptr = 4
  1893  	})
  1894  }
  1895  
  1896  func UnsafePoint(pc uintptr) bool {
  1897  	fi := findfunc(pc)
  1898  	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
  1899  	switch v {
  1900  	case abi.UnsafePointUnsafe:
  1901  		return true
  1902  	case abi.UnsafePointSafe:
  1903  		return false
  1904  	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
  1905  		// These are all interruptible, they just encode a nonstandard
  1906  		// way of recovering when interrupted.
  1907  		return false
  1908  	default:
  1909  		var buf [20]byte
  1910  		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
  1911  	}
  1912  }
  1913  
  1914  type TraceMap struct {
  1915  	traceMap
  1916  }
  1917  
  1918  func (m *TraceMap) PutString(s string) (uint64, bool) {
  1919  	return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
  1920  }
  1921  
  1922  func (m *TraceMap) Reset() {
  1923  	m.traceMap.reset()
  1924  }
  1925  
  1926  func SetSpinInGCMarkDone(spin bool) {
  1927  	gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
  1928  }
  1929  
  1930  func GCMarkDoneRestarted() bool {
  1931  	// Only read this outside of the GC. If we're running during a GC, just report false.
  1932  	mp := acquirem()
  1933  	if gcphase != _GCoff {
  1934  		releasem(mp)
  1935  		return false
  1936  	}
  1937  	restarted := gcDebugMarkDone.restartedDueTo27993
  1938  	releasem(mp)
  1939  	return restarted
  1940  }
  1941  
  1942  func GCMarkDoneResetRestartFlag() {
  1943  	mp := acquirem()
  1944  	for gcphase != _GCoff {
  1945  		releasem(mp)
  1946  		Gosched()
  1947  		mp = acquirem()
  1948  	}
  1949  	gcDebugMarkDone.restartedDueTo27993 = false
  1950  	releasem(mp)
  1951  }
  1952  
  1953  type BitCursor struct {
  1954  	b bitCursor
  1955  }
  1956  
  1957  func NewBitCursor(buf *byte) BitCursor {
  1958  	return BitCursor{b: bitCursor{ptr: buf, n: 0}}
  1959  }
  1960  
  1961  func (b BitCursor) Write(data *byte, cnt uintptr) {
  1962  	b.b.write(data, cnt)
  1963  }
  1964  func (b BitCursor) Offset(cnt uintptr) BitCursor {
  1965  	return BitCursor{b: b.b.offset(cnt)}
  1966  }
  1967  
  1968  const (
  1969  	BubbleAssocUnbubbled     = bubbleAssocUnbubbled
  1970  	BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
  1971  	BubbleAssocOtherBubble   = bubbleAssocOtherBubble
  1972  )
  1973  
  1974  type TraceStackTable traceStackTable
  1975  
  1976  func (t *TraceStackTable) Reset() {
  1977  	t.tab.reset()
  1978  }
  1979  
  1980  func TraceStack(gp *G, tab *TraceStackTable) {
  1981  	traceStack(0, gp, (*traceStackTable)(tab))
  1982  }
  1983  
  1984  var X86HasAVX = &x86HasAVX
  1985  
  1986  var DebugDecorateMappings = &debug.decoratemappings
  1987  
  1988  func SetVMANameSupported() bool { return setVMANameSupported() }
  1989  
  1990  type ListHead struct {
  1991  	l listHead
  1992  }
  1993  
  1994  func (head *ListHead) Init(off uintptr) {
  1995  	head.l.init(off)
  1996  }
  1997  
  1998  type ListNode struct {
  1999  	l listNode
  2000  }
  2001  
  2002  func (head *ListHead) Push(p unsafe.Pointer) {
  2003  	head.l.push(p)
  2004  }
  2005  
  2006  func (head *ListHead) Pop() unsafe.Pointer {
  2007  	return head.l.pop()
  2008  }
  2009  
  2010  func (head *ListHead) Remove(p unsafe.Pointer) {
  2011  	head.l.remove(p)
  2012  }
  2013  
  2014  type ListHeadManual struct {
  2015  	l listHeadManual
  2016  }
  2017  
  2018  func (head *ListHeadManual) Init(off uintptr) {
  2019  	head.l.init(off)
  2020  }
  2021  
  2022  type ListNodeManual struct {
  2023  	l listNodeManual
  2024  }
  2025  
  2026  func (head *ListHeadManual) Push(p unsafe.Pointer) {
  2027  	head.l.push(p)
  2028  }
  2029  
  2030  func (head *ListHeadManual) Pop() unsafe.Pointer {
  2031  	return head.l.pop()
  2032  }
  2033  
  2034  func (head *ListHeadManual) Remove(p unsafe.Pointer) {
  2035  	head.l.remove(p)
  2036  }
  2037  
  2038  func Hexdumper(base uintptr, wordBytes int, mark func(addr uintptr, start func()), data ...[]byte) string {
  2039  	buf := make([]byte, 0, 2048)
  2040  	getg().writebuf = buf
  2041  	h := hexdumper{addr: base, addrBytes: 4, wordBytes: uint8(wordBytes)}
  2042  	if mark != nil {
  2043  		h.mark = func(addr uintptr, m hexdumpMarker) {
  2044  			mark(addr, m.start)
  2045  		}
  2046  	}
  2047  	for _, d := range data {
  2048  		h.write(d)
  2049  	}
  2050  	h.close()
  2051  	n := len(getg().writebuf)
  2052  	getg().writebuf = nil
  2053  	if n == cap(buf) {
  2054  		panic("Hexdumper buf too small")
  2055  	}
  2056  	return string(buf[:n])
  2057  }
  2058  
  2059  func HexdumpWords(p, bytes uintptr) string {
  2060  	buf := make([]byte, 0, 2048)
  2061  	getg().writebuf = buf
  2062  	hexdumpWords(p, bytes, nil)
  2063  	n := len(getg().writebuf)
  2064  	getg().writebuf = nil
  2065  	if n == cap(buf) {
  2066  		panic("HexdumpWords buf too small")
  2067  	}
  2068  	return string(buf[:n])
  2069  }
  2070  
  2071  // DumpPrintQuoted provides access to print(quoted()) for the tests in
  2072  // runtime/print_quoted_test.go, allowing us to test that implementation.
  2073  func DumpPrintQuoted(s string) string {
  2074  	gp := getg()
  2075  	gp.writebuf = make([]byte, 0, 1<<20)
  2076  	print(quoted(s))
  2077  	buf := gp.writebuf
  2078  	gp.writebuf = nil
  2079  
  2080  	return string(buf)
  2081  }
  2082  

View as plain text