Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/gc"
15 "internal/runtime/sys"
16 "unsafe"
17 )
18
19 var Fadd64 = fadd64
20 var Fsub64 = fsub64
21 var Fmul64 = fmul64
22 var Fdiv64 = fdiv64
23 var F64to32 = f64to32
24 var F32to64 = f32to64
25 var Fcmp64 = fcmp64
26 var Fintto64 = fintto64
27 var F64toint = f64toint
28
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
33
34 var ReadRandomFailed = &readRandomFailed
35
36 var Fastlog2 = fastlog2
37
38 var ParseByteCount = parseByteCount
39
40 var Nanotime = nanotime
41 var Cputicks = cputicks
42 var CyclesPerSecond = pprof_cyclesPerSecond
43 var NetpollBreak = netpollBreak
44 var Usleep = usleep
45
46 var PhysPageSize = physPageSize
47 var PhysHugePageSize = physHugePageSize
48
49 var NetpollGenericInit = netpollGenericInit
50
51 var Memmove = memmove
52 var MemclrNoHeapPointers = memclrNoHeapPointers
53
54 var CgoCheckPointer = cgoCheckPointer
55
56 const CrashStackImplemented = crashStackImplemented
57
58 const TracebackInnerFrames = tracebackInnerFrames
59 const TracebackOuterFrames = tracebackOuterFrames
60
61 var LockPartialOrder = lockPartialOrder
62
63 type TimeTimer = timeTimer
64
65 type LockRank lockRank
66
67 func (l LockRank) String() string {
68 return lockRank(l).String()
69 }
70
71 const PreemptMSupported = preemptMSupported
72
73 type LFNode struct {
74 Next uint64
75 Pushcnt uintptr
76 }
77
78 func LFStackPush(head *uint64, node *LFNode) {
79 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
80 }
81
82 func LFStackPop(head *uint64) *LFNode {
83 return (*LFNode)((*lfstack)(head).pop())
84 }
85 func LFNodeValidate(node *LFNode) {
86 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
87 }
88
89 func Netpoll(delta int64) {
90 systemstack(func() {
91 netpoll(delta)
92 })
93 }
94
95 func PointerMask(x any) (ret []byte) {
96 systemstack(func() {
97 ret = pointerMask(x)
98 })
99 return
100 }
101
102 func RunSchedLocalQueueTest() {
103 pp := new(p)
104 gs := make([]g, len(pp.runq))
105 Escape(gs)
106 for i := 0; i < len(pp.runq); i++ {
107 if g, _ := runqget(pp); g != nil {
108 throw("runq is not empty initially")
109 }
110 for j := 0; j < i; j++ {
111 runqput(pp, &gs[i], false)
112 }
113 for j := 0; j < i; j++ {
114 if g, _ := runqget(pp); g != &gs[i] {
115 print("bad element at iter ", i, "/", j, "\n")
116 throw("bad element")
117 }
118 }
119 if g, _ := runqget(pp); g != nil {
120 throw("runq is not empty afterwards")
121 }
122 }
123 }
124
125 func RunSchedLocalQueueStealTest() {
126 p1 := new(p)
127 p2 := new(p)
128 gs := make([]g, len(p1.runq))
129 Escape(gs)
130 for i := 0; i < len(p1.runq); i++ {
131 for j := 0; j < i; j++ {
132 gs[j].sig = 0
133 runqput(p1, &gs[j], false)
134 }
135 gp := runqsteal(p2, p1, true)
136 s := 0
137 if gp != nil {
138 s++
139 gp.sig++
140 }
141 for {
142 gp, _ = runqget(p2)
143 if gp == nil {
144 break
145 }
146 s++
147 gp.sig++
148 }
149 for {
150 gp, _ = runqget(p1)
151 if gp == nil {
152 break
153 }
154 gp.sig++
155 }
156 for j := 0; j < i; j++ {
157 if gs[j].sig != 1 {
158 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
159 throw("bad element")
160 }
161 }
162 if s != i/2 && s != i/2+1 {
163 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
164 throw("bad steal")
165 }
166 }
167 }
168
169 func RunSchedLocalQueueEmptyTest(iters int) {
170
171
172
173
174 done := make(chan bool, 1)
175 p := new(p)
176 gs := make([]g, 2)
177 Escape(gs)
178 ready := new(uint32)
179 for i := 0; i < iters; i++ {
180 *ready = 0
181 next0 := (i & 1) == 0
182 next1 := (i & 2) == 0
183 runqput(p, &gs[0], next0)
184 go func() {
185 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 }
187 if runqempty(p) {
188 println("next:", next0, next1)
189 throw("queue is empty")
190 }
191 done <- true
192 }()
193 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
194 }
195 runqput(p, &gs[1], next1)
196 runqget(p)
197 <-done
198 runqget(p)
199 }
200 }
201
202 var (
203 StringHash = stringHash
204 BytesHash = bytesHash
205 Int32Hash = int32Hash
206 Int64Hash = int64Hash
207 MemHash = memhash
208 MemHash32 = memhash32
209 MemHash64 = memhash64
210 EfaceHash = efaceHash
211 IfaceHash = ifaceHash
212 )
213
214 var UseAeshash = &useAeshash
215
216 func MemclrBytes(b []byte) {
217 s := (*slice)(unsafe.Pointer(&b))
218 memclrNoHeapPointers(s.array, uintptr(s.len))
219 }
220
221 const HashLoad = hashLoad
222
223
224 func GostringW(w []uint16) (s string) {
225 systemstack(func() {
226 s = gostringw(&w[0])
227 })
228 return
229 }
230
231 var Open = open
232 var Close = closefd
233 var Read = read
234 var Write = write
235
236 func Envs() []string { return envs }
237 func SetEnvs(e []string) { envs = e }
238
239 const PtrSize = goarch.PtrSize
240
241 var ForceGCPeriod = &forcegcperiod
242
243
244
245
246 func SetTracebackEnv(level string) {
247 setTraceback(level)
248 traceback_env = traceback_cache
249 }
250
251 var ReadUnaligned32 = readUnaligned32
252 var ReadUnaligned64 = readUnaligned64
253
254 func CountPagesInUse() (pagesInUse, counted uintptr) {
255 stw := stopTheWorld(stwForTestCountPagesInUse)
256
257 pagesInUse = mheap_.pagesInUse.Load()
258
259 for _, s := range mheap_.allspans {
260 if s.state.get() == mSpanInUse {
261 counted += s.npages
262 }
263 }
264
265 startTheWorld(stw)
266
267 return
268 }
269
270 func Fastrand() uint32 { return uint32(rand()) }
271 func Fastrand64() uint64 { return rand() }
272 func Fastrandn(n uint32) uint32 { return randn(n) }
273
274 type ProfBuf profBuf
275
276 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
277 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
278 }
279
280 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
281 (*profBuf)(p).write(tag, now, hdr, stk)
282 }
283
284 const (
285 ProfBufBlocking = profBufBlocking
286 ProfBufNonBlocking = profBufNonBlocking
287 )
288
289 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
290 return (*profBuf)(p).read(mode)
291 }
292
293 func (p *ProfBuf) Close() {
294 (*profBuf)(p).close()
295 }
296
297 type CPUStats = cpuStats
298
299 func ReadCPUStats() CPUStats {
300 return work.cpuStats
301 }
302
303 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
304 stw := stopTheWorld(stwForTestReadMetricsSlow)
305
306
307
308 metricsLock()
309 initMetrics()
310
311 systemstack(func() {
312
313
314 getg().racectx = getg().m.curg.racectx
315
316
317
318
319
320
321 readMetricsLocked(samplesp, len, cap)
322
323
324
325
326
327 readmemstats_m(memStats)
328
329
330
331
332 readMetricsLocked(samplesp, len, cap)
333
334
335 getg().racectx = 0
336 })
337 metricsUnlock()
338
339 startTheWorld(stw)
340 }
341
342 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
343
344
345
346 func ReadMemStatsSlow() (base, slow MemStats) {
347 stw := stopTheWorld(stwForTestReadMemStatsSlow)
348
349
350 systemstack(func() {
351
352 getg().m.mallocing++
353
354 readmemstats_m(&base)
355
356
357
358 slow = base
359 slow.Alloc = 0
360 slow.TotalAlloc = 0
361 slow.Mallocs = 0
362 slow.Frees = 0
363 slow.HeapReleased = 0
364 var bySize [gc.NumSizeClasses]struct {
365 Mallocs, Frees uint64
366 }
367
368
369 for _, s := range mheap_.allspans {
370 if s.state.get() != mSpanInUse {
371 continue
372 }
373 if s.isUnusedUserArenaChunk() {
374 continue
375 }
376 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
377 slow.Mallocs++
378 slow.Alloc += uint64(s.elemsize)
379 } else {
380 slow.Mallocs += uint64(s.allocCount)
381 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
382 bySize[sizeclass].Mallocs += uint64(s.allocCount)
383 }
384 }
385
386
387 var m heapStatsDelta
388 memstats.heapStats.unsafeRead(&m)
389
390
391 var smallFree uint64
392 for i := 0; i < gc.NumSizeClasses; i++ {
393 slow.Frees += m.smallFreeCount[i]
394 bySize[i].Frees += m.smallFreeCount[i]
395 bySize[i].Mallocs += m.smallFreeCount[i]
396 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
397 }
398 slow.Frees += m.tinyAllocCount + m.largeFreeCount
399 slow.Mallocs += slow.Frees
400
401 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
402
403 for i := range slow.BySize {
404 slow.BySize[i].Mallocs = bySize[i].Mallocs
405 slow.BySize[i].Frees = bySize[i].Frees
406 }
407
408 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
409 chunk := mheap_.pages.tryChunkOf(i)
410 if chunk == nil {
411 continue
412 }
413 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
414 slow.HeapReleased += uint64(pg) * pageSize
415 }
416 for _, p := range allp {
417
418 pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
419 slow.HeapReleased += uint64(pg) * pageSize
420 }
421
422 getg().m.mallocing--
423 })
424
425 startTheWorld(stw)
426 return
427 }
428
429
430
431
432 func ShrinkStackAndVerifyFramePointers() {
433 before := stackPoisonCopy
434 defer func() { stackPoisonCopy = before }()
435 stackPoisonCopy = 1
436
437 gp := getg()
438 systemstack(func() {
439 shrinkstack(gp)
440 })
441
442
443 FPCallers(make([]uintptr, 1024))
444 }
445
446
447
448
449 func BlockOnSystemStack() {
450 systemstack(blockOnSystemStackInternal)
451 }
452
453 func blockOnSystemStackInternal() {
454 print("x\n")
455 lock(&deadlock)
456 lock(&deadlock)
457 }
458
459 type RWMutex struct {
460 rw rwmutex
461 }
462
463 func (rw *RWMutex) Init() {
464 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
465 }
466
467 func (rw *RWMutex) RLock() {
468 rw.rw.rlock()
469 }
470
471 func (rw *RWMutex) RUnlock() {
472 rw.rw.runlock()
473 }
474
475 func (rw *RWMutex) Lock() {
476 rw.rw.lock()
477 }
478
479 func (rw *RWMutex) Unlock() {
480 rw.rw.unlock()
481 }
482
483 func LockOSCounts() (external, internal uint32) {
484 gp := getg()
485 if gp.m.lockedExt+gp.m.lockedInt == 0 {
486 if gp.lockedm != 0 {
487 panic("lockedm on non-locked goroutine")
488 }
489 } else {
490 if gp.lockedm == 0 {
491 panic("nil lockedm on locked goroutine")
492 }
493 }
494 return gp.m.lockedExt, gp.m.lockedInt
495 }
496
497
498 func TracebackSystemstack(stk []uintptr, i int) int {
499 if i == 0 {
500 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
501 var u unwinder
502 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
503 return tracebackPCs(&u, 0, stk)
504 }
505 n := 0
506 systemstack(func() {
507 n = TracebackSystemstack(stk, i-1)
508 })
509 return n
510 }
511
512 func KeepNArenaHints(n int) {
513 hint := mheap_.arenaHints
514 for i := 1; i < n; i++ {
515 hint = hint.next
516 if hint == nil {
517 return
518 }
519 }
520 hint.next = nil
521 }
522
523
524
525
526
527
528
529 func MapNextArenaHint() (start, end uintptr, ok bool) {
530 hint := mheap_.arenaHints
531 addr := hint.addr
532 if hint.down {
533 start, end = addr-heapArenaBytes, addr
534 addr -= physPageSize
535 } else {
536 start, end = addr, addr+heapArenaBytes
537 }
538 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
539 ok = (addr == uintptr(got))
540 if !ok {
541
542
543 sysFreeOS(got, physPageSize)
544 }
545 return
546 }
547
548 func GetNextArenaHint() uintptr {
549 return mheap_.arenaHints.addr
550 }
551
552 type G = g
553
554 type Sudog = sudog
555
556 type XRegPerG = xRegPerG
557
558 func Getg() *G {
559 return getg()
560 }
561
562 func Goid() uint64 {
563 return getg().goid
564 }
565
566 func GIsWaitingOnMutex(gp *G) bool {
567 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
568 }
569
570 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
571
572
573 func PanicForTesting(b []byte, i int) byte {
574 return unexportedPanicForTesting(b, i)
575 }
576
577
578 func unexportedPanicForTesting(b []byte, i int) byte {
579 return b[i]
580 }
581
582 func G0StackOverflow() {
583 systemstack(func() {
584 g0 := getg()
585 sp := sys.GetCallerSP()
586
587
588
589 g0.stack.lo = sp - 4096 - stackSystem
590 g0.stackguard0 = g0.stack.lo + stackGuard
591 g0.stackguard1 = g0.stackguard0
592
593 stackOverflow(nil)
594 })
595 }
596
597 func stackOverflow(x *byte) {
598 var buf [256]byte
599 stackOverflow(&buf[0])
600 }
601
602 func RunGetgThreadSwitchTest() {
603
604
605
606
607
608
609 ch := make(chan int)
610 go func(ch chan int) {
611 ch <- 5
612 LockOSThread()
613 }(ch)
614
615 g1 := getg()
616
617
618
619
620
621 <-ch
622
623 g2 := getg()
624 if g1 != g2 {
625 panic("g1 != g2")
626 }
627
628
629
630 g3 := getg()
631 if g1 != g3 {
632 panic("g1 != g3")
633 }
634 }
635
636 const (
637 PageSize = pageSize
638 PallocChunkPages = pallocChunkPages
639 PageAlloc64Bit = pageAlloc64Bit
640 PallocSumBytes = pallocSumBytes
641 )
642
643
644 type PallocSum pallocSum
645
646 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
647 func (m PallocSum) Start() uint { return pallocSum(m).start() }
648 func (m PallocSum) Max() uint { return pallocSum(m).max() }
649 func (m PallocSum) End() uint { return pallocSum(m).end() }
650
651
652 type PallocBits pallocBits
653
654 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
655 return (*pallocBits)(b).find(npages, searchIdx)
656 }
657 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
658 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
659 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
660 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
661
662
663
664 func SummarizeSlow(b *PallocBits) PallocSum {
665 var start, most, end uint
666
667 const N = uint(len(b)) * 64
668 for start < N && (*pageBits)(b).get(start) == 0 {
669 start++
670 }
671 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
672 end++
673 }
674 run := uint(0)
675 for i := uint(0); i < N; i++ {
676 if (*pageBits)(b).get(i) == 0 {
677 run++
678 } else {
679 run = 0
680 }
681 most = max(most, run)
682 }
683 return PackPallocSum(start, most, end)
684 }
685
686
687 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
688
689
690
691 func DiffPallocBits(a, b *PallocBits) []BitRange {
692 ba := (*pageBits)(a)
693 bb := (*pageBits)(b)
694
695 var d []BitRange
696 base, size := uint(0), uint(0)
697 for i := uint(0); i < uint(len(ba))*64; i++ {
698 if ba.get(i) != bb.get(i) {
699 if size == 0 {
700 base = i
701 }
702 size++
703 } else {
704 if size != 0 {
705 d = append(d, BitRange{base, size})
706 }
707 size = 0
708 }
709 }
710 if size != 0 {
711 d = append(d, BitRange{base, size})
712 }
713 return d
714 }
715
716
717
718
719 func StringifyPallocBits(b *PallocBits, r BitRange) string {
720 str := ""
721 for j := r.I; j < r.I+r.N; j++ {
722 if (*pageBits)(b).get(j) != 0 {
723 str += "1"
724 } else {
725 str += "0"
726 }
727 }
728 return str
729 }
730
731
732 type PallocData pallocData
733
734 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
735 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
736 }
737 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
738 func (d *PallocData) ScavengedSetRange(i, n uint) {
739 (*pallocData)(d).scavenged.setRange(i, n)
740 }
741 func (d *PallocData) PallocBits() *PallocBits {
742 return (*PallocBits)(&(*pallocData)(d).pallocBits)
743 }
744 func (d *PallocData) Scavenged() *PallocBits {
745 return (*PallocBits)(&(*pallocData)(d).scavenged)
746 }
747
748
749 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
750
751
752 type PageCache pageCache
753
754 const PageCachePages = pageCachePages
755
756 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
757 return PageCache(pageCache{base: base, cache: cache, scav: scav})
758 }
759 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
760 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
761 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
762 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
763 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
764 return (*pageCache)(c).alloc(npages)
765 }
766 func (c *PageCache) Flush(s *PageAlloc) {
767 cp := (*pageCache)(c)
768 sp := (*pageAlloc)(s)
769
770 systemstack(func() {
771
772
773 lock(sp.mheapLock)
774 cp.flush(sp)
775 unlock(sp.mheapLock)
776 })
777 }
778
779
780 type ChunkIdx chunkIdx
781
782
783
784 type PageAlloc pageAlloc
785
786 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
787 pp := (*pageAlloc)(p)
788
789 var addr, scav uintptr
790 systemstack(func() {
791
792
793 lock(pp.mheapLock)
794 addr, scav = pp.alloc(npages)
795 unlock(pp.mheapLock)
796 })
797 return addr, scav
798 }
799 func (p *PageAlloc) AllocToCache() PageCache {
800 pp := (*pageAlloc)(p)
801
802 var c PageCache
803 systemstack(func() {
804
805
806 lock(pp.mheapLock)
807 c = PageCache(pp.allocToCache())
808 unlock(pp.mheapLock)
809 })
810 return c
811 }
812 func (p *PageAlloc) Free(base, npages uintptr) {
813 pp := (*pageAlloc)(p)
814
815 systemstack(func() {
816
817
818 lock(pp.mheapLock)
819 pp.free(base, npages)
820 unlock(pp.mheapLock)
821 })
822 }
823 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
824 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
825 }
826 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
827 pp := (*pageAlloc)(p)
828 systemstack(func() {
829 r = pp.scavenge(nbytes, nil, true)
830 })
831 return
832 }
833 func (p *PageAlloc) InUse() []AddrRange {
834 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
835 for _, r := range p.inUse.ranges {
836 ranges = append(ranges, AddrRange{r})
837 }
838 return ranges
839 }
840
841
842 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
843 ci := chunkIdx(i)
844 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
845 }
846
847
848 type AddrRange struct {
849 addrRange
850 }
851
852
853 func MakeAddrRange(base, limit uintptr) AddrRange {
854 return AddrRange{makeAddrRange(base, limit)}
855 }
856
857
858 func (a AddrRange) Base() uintptr {
859 return a.addrRange.base.addr()
860 }
861
862
863 func (a AddrRange) Limit() uintptr {
864 return a.addrRange.limit.addr()
865 }
866
867
868 func (a AddrRange) Equals(b AddrRange) bool {
869 return a == b
870 }
871
872
873 func (a AddrRange) Size() uintptr {
874 return a.addrRange.size()
875 }
876
877
878
879
880
881 var testSysStat = &memstats.other_sys
882
883
884 type AddrRanges struct {
885 addrRanges
886 mutable bool
887 }
888
889
890
891
892
893
894
895
896
897
898 func NewAddrRanges() AddrRanges {
899 r := addrRanges{}
900 r.init(testSysStat)
901 return AddrRanges{r, true}
902 }
903
904
905
906
907
908
909 func MakeAddrRanges(a ...AddrRange) AddrRanges {
910
911
912
913
914
915 ranges := make([]addrRange, 0, len(a))
916 total := uintptr(0)
917 for _, r := range a {
918 ranges = append(ranges, r.addrRange)
919 total += r.Size()
920 }
921 return AddrRanges{addrRanges{
922 ranges: ranges,
923 totalBytes: total,
924 sysStat: testSysStat,
925 }, false}
926 }
927
928
929
930 func (a *AddrRanges) Ranges() []AddrRange {
931 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
932 for _, r := range a.addrRanges.ranges {
933 result = append(result, AddrRange{r})
934 }
935 return result
936 }
937
938
939
940 func (a *AddrRanges) FindSucc(base uintptr) int {
941 return a.findSucc(base)
942 }
943
944
945
946
947
948 func (a *AddrRanges) Add(r AddrRange) {
949 if !a.mutable {
950 throw("attempt to mutate immutable AddrRanges")
951 }
952 a.add(r.addrRange)
953 }
954
955
956 func (a *AddrRanges) TotalBytes() uintptr {
957 return a.addrRanges.totalBytes
958 }
959
960
961 type BitRange struct {
962 I, N uint
963 }
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
980 p := new(pageAlloc)
981
982
983 p.init(new(mutex), testSysStat, true)
984 lockInit(p.mheapLock, lockRankMheap)
985 for i, init := range chunks {
986 addr := chunkBase(chunkIdx(i))
987
988
989 systemstack(func() {
990 lock(p.mheapLock)
991 p.grow(addr, pallocChunkBytes)
992 unlock(p.mheapLock)
993 })
994
995
996 ci := chunkIndex(addr)
997 chunk := p.chunkOf(ci)
998
999
1000 chunk.scavenged.clearRange(0, pallocChunkPages)
1001
1002
1003
1004
1005 p.scav.index.alloc(ci, pallocChunkPages)
1006 p.scav.index.free(ci, 0, pallocChunkPages)
1007
1008
1009 if scav != nil {
1010 if scvg, ok := scav[i]; ok {
1011 for _, s := range scvg {
1012
1013
1014 if s.N != 0 {
1015 chunk.scavenged.setRange(s.I, s.N)
1016 }
1017 }
1018 }
1019 }
1020
1021
1022 for _, s := range init {
1023
1024
1025 if s.N != 0 {
1026 chunk.allocRange(s.I, s.N)
1027
1028
1029 p.scav.index.alloc(ci, s.N)
1030 }
1031 }
1032
1033
1034 systemstack(func() {
1035 lock(p.mheapLock)
1036 p.update(addr, pallocChunkPages, false, false)
1037 unlock(p.mheapLock)
1038 })
1039 }
1040
1041 return (*PageAlloc)(p)
1042 }
1043
1044
1045
1046
1047 func FreePageAlloc(pp *PageAlloc) {
1048 p := (*pageAlloc)(pp)
1049
1050
1051 if pageAlloc64Bit != 0 {
1052 for l := 0; l < summaryLevels; l++ {
1053 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1054 }
1055 } else {
1056 resSize := uintptr(0)
1057 for _, s := range p.summary {
1058 resSize += uintptr(cap(s)) * pallocSumBytes
1059 }
1060 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1061 }
1062
1063
1064 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1065
1066
1067
1068
1069
1070 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1071 testSysStat.add(-int64(p.summaryMappedReady))
1072
1073
1074 for i := range p.chunks {
1075 if x := p.chunks[i]; x != nil {
1076 p.chunks[i] = nil
1077
1078 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1079 }
1080 }
1081 }
1082
1083
1084
1085
1086
1087
1088
1089 var BaseChunkIdx = func() ChunkIdx {
1090 var prefix uintptr
1091 if pageAlloc64Bit != 0 {
1092 prefix = 0xc000
1093 } else {
1094 prefix = 0x100
1095 }
1096 baseAddr := prefix * pallocChunkBytes
1097 if goos.IsAix != 0 {
1098 baseAddr += arenaBaseOffset
1099 }
1100 return ChunkIdx(chunkIndex(baseAddr))
1101 }()
1102
1103
1104
1105 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1106 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1107 }
1108
1109 type BitsMismatch struct {
1110 Base uintptr
1111 Got, Want uint64
1112 }
1113
1114 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1115 ok = true
1116
1117
1118 systemstack(func() {
1119 getg().m.mallocing++
1120
1121
1122 lock(&mheap_.lock)
1123
1124 chunkLoop:
1125 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1126 chunk := mheap_.pages.tryChunkOf(i)
1127 if chunk == nil {
1128 continue
1129 }
1130 cb := chunkBase(i)
1131 for j := 0; j < pallocChunkPages/64; j++ {
1132
1133
1134
1135
1136
1137 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1138 got := chunk.scavenged[j]
1139 if want != got {
1140 ok = false
1141 if n >= len(mismatches) {
1142 break chunkLoop
1143 }
1144 mismatches[n] = BitsMismatch{
1145 Base: cb + uintptr(j)*64*pageSize,
1146 Got: got,
1147 Want: want,
1148 }
1149 n++
1150 }
1151 }
1152 }
1153 unlock(&mheap_.lock)
1154
1155 getg().m.mallocing--
1156 })
1157
1158 if randomizeHeapBase && len(mismatches) > 0 {
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 affectedArenas := map[arenaIdx]bool{}
1174 for _, mismatch := range mismatches {
1175 if mismatch.Base > 0 {
1176 affectedArenas[arenaIndex(mismatch.Base)] = true
1177 }
1178 }
1179 if len(affectedArenas) == 1 {
1180 ok = true
1181
1182 for i := range n {
1183 mismatches[i] = BitsMismatch{}
1184 }
1185 }
1186 }
1187
1188 return
1189 }
1190
1191 func PageCachePagesLeaked() (leaked uintptr) {
1192 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1193
1194
1195 deadp := allp[len(allp):cap(allp)]
1196 for _, p := range deadp {
1197
1198
1199 if p != nil {
1200 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1201 }
1202 }
1203
1204 startTheWorld(stw)
1205 return
1206 }
1207
1208 var ProcYield = procyield
1209 var OSYield = osyield
1210
1211 type Mutex = mutex
1212
1213 var Lock = lock
1214 var Unlock = unlock
1215
1216 var MutexContended = mutexContended
1217
1218 func SemRootLock(addr *uint32) *mutex {
1219 root := semtable.rootFor(addr)
1220 return &root.lock
1221 }
1222
1223 var Semacquire = semacquire
1224 var Semrelease1 = semrelease1
1225
1226 func SemNwait(addr *uint32) uint32 {
1227 root := semtable.rootFor(addr)
1228 return root.nwait.Load()
1229 }
1230
1231 const SemTableSize = semTabSize
1232
1233
1234 type SemTable struct {
1235 semTable
1236 }
1237
1238
1239 func (t *SemTable) Enqueue(addr *uint32) {
1240 s := acquireSudog()
1241 s.releasetime = 0
1242 s.acquiretime = 0
1243 s.ticket = 0
1244 t.semTable.rootFor(addr).queue(addr, s, false)
1245 }
1246
1247
1248
1249
1250 func (t *SemTable) Dequeue(addr *uint32) bool {
1251 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1252 if s != nil {
1253 releaseSudog(s)
1254 return true
1255 }
1256 return false
1257 }
1258
1259
1260 type MSpan mspan
1261
1262
1263 func AllocMSpan() *MSpan {
1264 var s *mspan
1265 systemstack(func() {
1266 lock(&mheap_.lock)
1267 s = (*mspan)(mheap_.spanalloc.alloc())
1268 s.init(0, 0)
1269 unlock(&mheap_.lock)
1270 })
1271 return (*MSpan)(s)
1272 }
1273
1274
1275 func FreeMSpan(s *MSpan) {
1276 systemstack(func() {
1277 lock(&mheap_.lock)
1278 mheap_.spanalloc.free(unsafe.Pointer(s))
1279 unlock(&mheap_.lock)
1280 })
1281 }
1282
1283 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1284 s := (*mspan)(ms)
1285 s.nelems = uint16(len(bits) * 8)
1286 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1287 result := s.countAlloc()
1288 s.gcmarkBits = nil
1289 return result
1290 }
1291
1292 type MSpanQueue mSpanQueue
1293
1294 func (q *MSpanQueue) Size() int {
1295 return (*mSpanQueue)(q).n
1296 }
1297
1298 func (q *MSpanQueue) Push(s *MSpan) {
1299 (*mSpanQueue)(q).push((*mspan)(s))
1300 }
1301
1302 func (q *MSpanQueue) Pop() *MSpan {
1303 s := (*mSpanQueue)(q).pop()
1304 return (*MSpan)(s)
1305 }
1306
1307 func (q *MSpanQueue) TakeAll(p *MSpanQueue) {
1308 (*mSpanQueue)(q).takeAll((*mSpanQueue)(p))
1309 }
1310
1311 func (q *MSpanQueue) PopN(n int) MSpanQueue {
1312 p := (*mSpanQueue)(q).popN(n)
1313 return (MSpanQueue)(p)
1314 }
1315
1316 const (
1317 TimeHistSubBucketBits = timeHistSubBucketBits
1318 TimeHistNumSubBuckets = timeHistNumSubBuckets
1319 TimeHistNumBuckets = timeHistNumBuckets
1320 TimeHistMinBucketBits = timeHistMinBucketBits
1321 TimeHistMaxBucketBits = timeHistMaxBucketBits
1322 )
1323
1324 type TimeHistogram timeHistogram
1325
1326
1327
1328
1329
1330 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1331 t := (*timeHistogram)(th)
1332 if bucket < 0 {
1333 return t.underflow.Load(), false
1334 }
1335 i := bucket*TimeHistNumSubBuckets + subBucket
1336 if i >= len(t.counts) {
1337 return t.overflow.Load(), false
1338 }
1339 return t.counts[i].Load(), true
1340 }
1341
1342 func (th *TimeHistogram) Record(duration int64) {
1343 (*timeHistogram)(th).record(duration)
1344 }
1345
1346 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1347
1348 func SetIntArgRegs(a int) int {
1349 lock(&finlock)
1350 old := intArgRegs
1351 if a >= 0 {
1352 intArgRegs = a
1353 }
1354 unlock(&finlock)
1355 return old
1356 }
1357
1358 func FinalizerGAsleep() bool {
1359 return fingStatus.Load()&fingWait != 0
1360 }
1361
1362
1363
1364
1365 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1366
1367
1368
1369 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1370 return gcTestIsReachable(ptrs...)
1371 }
1372
1373
1374
1375
1376
1377
1378
1379 func GCTestPointerClass(p unsafe.Pointer) string {
1380 return gcTestPointerClass(p)
1381 }
1382
1383 const Raceenabled = raceenabled
1384
1385 const (
1386 GCBackgroundUtilization = gcBackgroundUtilization
1387 GCGoalUtilization = gcGoalUtilization
1388 DefaultHeapMinimum = defaultHeapMinimum
1389 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1390 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1391 )
1392
1393 type GCController struct {
1394 gcControllerState
1395 }
1396
1397 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1398
1399
1400
1401
1402 g := Escape(new(GCController))
1403 g.gcControllerState.test = true
1404 g.init(int32(gcPercent), memoryLimit)
1405 return g
1406 }
1407
1408 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1409 trigger, _ := c.trigger()
1410 if c.heapMarked > trigger {
1411 trigger = c.heapMarked
1412 }
1413 c.maxStackScan.Store(stackSize)
1414 c.globalsScan.Store(globalsSize)
1415 c.heapLive.Store(trigger)
1416 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1417 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1418 }
1419
1420 func (c *GCController) AssistWorkPerByte() float64 {
1421 return c.assistWorkPerByte.Load()
1422 }
1423
1424 func (c *GCController) HeapGoal() uint64 {
1425 return c.heapGoal()
1426 }
1427
1428 func (c *GCController) HeapLive() uint64 {
1429 return c.heapLive.Load()
1430 }
1431
1432 func (c *GCController) HeapMarked() uint64 {
1433 return c.heapMarked
1434 }
1435
1436 func (c *GCController) Triggered() uint64 {
1437 return c.triggered
1438 }
1439
1440 type GCControllerReviseDelta struct {
1441 HeapLive int64
1442 HeapScan int64
1443 HeapScanWork int64
1444 StackScanWork int64
1445 GlobalsScanWork int64
1446 }
1447
1448 func (c *GCController) Revise(d GCControllerReviseDelta) {
1449 c.heapLive.Add(d.HeapLive)
1450 c.heapScan.Add(d.HeapScan)
1451 c.heapScanWork.Add(d.HeapScanWork)
1452 c.stackScanWork.Add(d.StackScanWork)
1453 c.globalsScanWork.Add(d.GlobalsScanWork)
1454 c.revise()
1455 }
1456
1457 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1458 c.assistTime.Store(assistTime)
1459 c.endCycle(elapsed, gomaxprocs, false)
1460 c.resetLive(bytesMarked)
1461 c.commit(false)
1462 }
1463
1464 func (c *GCController) AddIdleMarkWorker() bool {
1465 return c.addIdleMarkWorker()
1466 }
1467
1468 func (c *GCController) NeedIdleMarkWorker() bool {
1469 return c.needIdleMarkWorker()
1470 }
1471
1472 func (c *GCController) RemoveIdleMarkWorker() {
1473 c.removeIdleMarkWorker()
1474 }
1475
1476 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1477 c.setMaxIdleMarkWorkers(max)
1478 }
1479
1480 var alwaysFalse bool
1481 var escapeSink any
1482
1483 func Escape[T any](x T) T {
1484 if alwaysFalse {
1485 escapeSink = x
1486 }
1487 return x
1488 }
1489
1490
1491 func Acquirem() {
1492 acquirem()
1493 }
1494
1495 func Releasem() {
1496 releasem(getg().m)
1497 }
1498
1499 var Timediv = timediv
1500
1501 type PIController struct {
1502 piController
1503 }
1504
1505 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1506 return &PIController{piController{
1507 kp: kp,
1508 ti: ti,
1509 tt: tt,
1510 min: min,
1511 max: max,
1512 }}
1513 }
1514
1515 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1516 return c.piController.next(input, setpoint, period)
1517 }
1518
1519 const (
1520 CapacityPerProc = capacityPerProc
1521 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1522 )
1523
1524 type GCCPULimiter struct {
1525 limiter gcCPULimiterState
1526 }
1527
1528 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1529
1530
1531
1532
1533 l := Escape(new(GCCPULimiter))
1534 l.limiter.test = true
1535 l.limiter.resetCapacity(now, gomaxprocs)
1536 return l
1537 }
1538
1539 func (l *GCCPULimiter) Fill() uint64 {
1540 return l.limiter.bucket.fill
1541 }
1542
1543 func (l *GCCPULimiter) Capacity() uint64 {
1544 return l.limiter.bucket.capacity
1545 }
1546
1547 func (l *GCCPULimiter) Overflow() uint64 {
1548 return l.limiter.overflow
1549 }
1550
1551 func (l *GCCPULimiter) Limiting() bool {
1552 return l.limiter.limiting()
1553 }
1554
1555 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1556 return l.limiter.needUpdate(now)
1557 }
1558
1559 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1560 l.limiter.startGCTransition(enableGC, now)
1561 }
1562
1563 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1564 l.limiter.finishGCTransition(now)
1565 }
1566
1567 func (l *GCCPULimiter) Update(now int64) {
1568 l.limiter.update(now)
1569 }
1570
1571 func (l *GCCPULimiter) AddAssistTime(t int64) {
1572 l.limiter.addAssistTime(t)
1573 }
1574
1575 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1576 l.limiter.resetCapacity(now, nprocs)
1577 }
1578
1579 const ScavengePercent = scavengePercent
1580
1581 type Scavenger struct {
1582 Sleep func(int64) int64
1583 Scavenge func(uintptr) (uintptr, int64)
1584 ShouldStop func() bool
1585 GoMaxProcs func() int32
1586
1587 released atomic.Uintptr
1588 scavenger scavengerState
1589 stop chan<- struct{}
1590 done <-chan struct{}
1591 }
1592
1593 func (s *Scavenger) Start() {
1594 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1595 panic("must populate all stubs")
1596 }
1597
1598
1599 s.scavenger.sleepStub = s.Sleep
1600 s.scavenger.scavenge = s.Scavenge
1601 s.scavenger.shouldStop = s.ShouldStop
1602 s.scavenger.gomaxprocs = s.GoMaxProcs
1603
1604
1605 stop := make(chan struct{})
1606 s.stop = stop
1607 done := make(chan struct{})
1608 s.done = done
1609 go func() {
1610
1611 s.scavenger.init()
1612 s.scavenger.park()
1613 for {
1614 select {
1615 case <-stop:
1616 close(done)
1617 return
1618 default:
1619 }
1620 released, workTime := s.scavenger.run()
1621 if released == 0 {
1622 s.scavenger.park()
1623 continue
1624 }
1625 s.released.Add(released)
1626 s.scavenger.sleep(workTime)
1627 }
1628 }()
1629 if !s.BlockUntilParked(1e9 ) {
1630 panic("timed out waiting for scavenger to get ready")
1631 }
1632 }
1633
1634
1635
1636
1637
1638
1639
1640 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1641
1642
1643
1644
1645
1646 start := nanotime()
1647 for nanotime()-start < timeout {
1648 lock(&s.scavenger.lock)
1649 parked := s.scavenger.parked
1650 unlock(&s.scavenger.lock)
1651 if parked {
1652 return true
1653 }
1654 Gosched()
1655 }
1656 return false
1657 }
1658
1659
1660 func (s *Scavenger) Released() uintptr {
1661 return s.released.Load()
1662 }
1663
1664
1665 func (s *Scavenger) Wake() {
1666 s.scavenger.wake()
1667 }
1668
1669
1670
1671 func (s *Scavenger) Stop() {
1672 lock(&s.scavenger.lock)
1673 parked := s.scavenger.parked
1674 unlock(&s.scavenger.lock)
1675 if !parked {
1676 panic("tried to clean up scavenger that is not parked")
1677 }
1678 close(s.stop)
1679 s.Wake()
1680 <-s.done
1681 }
1682
1683 type ScavengeIndex struct {
1684 i scavengeIndex
1685 }
1686
1687 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1688 s := new(ScavengeIndex)
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 s.i.chunks = make([]atomicScavChunkData, max)
1701 s.i.min.Store(uintptr(min))
1702 s.i.max.Store(uintptr(max))
1703 s.i.minHeapIdx.Store(uintptr(min))
1704 s.i.test = true
1705 return s
1706 }
1707
1708 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1709 ci, off := s.i.find(force)
1710 return ChunkIdx(ci), off
1711 }
1712
1713 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1714 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1715 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1716
1717 if sc == ec {
1718
1719 s.i.alloc(sc, ei+1-si)
1720 } else {
1721
1722 s.i.alloc(sc, pallocChunkPages-si)
1723 for c := sc + 1; c < ec; c++ {
1724 s.i.alloc(c, pallocChunkPages)
1725 }
1726 s.i.alloc(ec, ei+1)
1727 }
1728 }
1729
1730 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1731 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1732 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1733
1734 if sc == ec {
1735
1736 s.i.free(sc, si, ei+1-si)
1737 } else {
1738
1739 s.i.free(sc, si, pallocChunkPages-si)
1740 for c := sc + 1; c < ec; c++ {
1741 s.i.free(c, 0, pallocChunkPages)
1742 }
1743 s.i.free(ec, 0, ei+1)
1744 }
1745 }
1746
1747 func (s *ScavengeIndex) ResetSearchAddrs() {
1748 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1749 addr, marked := a.Load()
1750 if marked {
1751 a.StoreUnmark(addr, addr)
1752 }
1753 a.Clear()
1754 }
1755 s.i.freeHWM = minOffAddr
1756 }
1757
1758 func (s *ScavengeIndex) NextGen() {
1759 s.i.nextGen()
1760 }
1761
1762 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1763 s.i.setEmpty(chunkIdx(ci))
1764 }
1765
1766 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1767 sc0 := scavChunkData{
1768 gen: gen,
1769 inUse: inUse,
1770 lastInUse: lastInUse,
1771 scavChunkFlags: scavChunkFlags(flags),
1772 }
1773 scp := sc0.pack()
1774 sc1 := unpackScavChunkData(scp)
1775 return sc0 == sc1
1776 }
1777
1778 const GTrackingPeriod = gTrackingPeriod
1779
1780 var ZeroBase = unsafe.Pointer(&zerobase)
1781
1782 const UserArenaChunkBytes = userArenaChunkBytes
1783
1784 type UserArena struct {
1785 arena *userArena
1786 }
1787
1788 func NewUserArena() *UserArena {
1789 return &UserArena{newUserArena()}
1790 }
1791
1792 func (a *UserArena) New(out *any) {
1793 i := efaceOf(out)
1794 typ := i._type
1795 if typ.Kind() != abi.Pointer {
1796 panic("new result of non-ptr type")
1797 }
1798 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1799 i.data = a.arena.new(typ)
1800 }
1801
1802 func (a *UserArena) Slice(sl any, cap int) {
1803 a.arena.slice(sl, cap)
1804 }
1805
1806 func (a *UserArena) Free() {
1807 a.arena.free()
1808 }
1809
1810 func GlobalWaitingArenaChunks() int {
1811 n := 0
1812 systemstack(func() {
1813 lock(&mheap_.lock)
1814 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1815 n++
1816 }
1817 unlock(&mheap_.lock)
1818 })
1819 return n
1820 }
1821
1822 func UserArenaClone[T any](s T) T {
1823 return arena_heapify(s).(T)
1824 }
1825
1826 var AlignUp = alignUp
1827
1828 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1829 return blockUntilEmptyFinalizerQueue(timeout)
1830 }
1831
1832 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1833 return gcCleanups.blockUntilEmpty(timeout)
1834 }
1835
1836 func FrameStartLine(f *Frame) int {
1837 return f.startLine
1838 }
1839
1840
1841
1842 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1843 return persistentalloc(n, align, &memstats.other_sys)
1844 }
1845
1846 const TagAlign = tagAlign
1847
1848
1849
1850 func FPCallers(pcBuf []uintptr) int {
1851 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1852 }
1853
1854 const FramePointerEnabled = framepointer_enabled
1855
1856 var (
1857 IsPinned = isPinned
1858 GetPinCounter = pinnerGetPinCounter
1859 )
1860
1861 func SetPinnerLeakPanic(f func()) {
1862 pinnerLeakPanic = f
1863 }
1864 func GetPinnerLeakPanic() func() {
1865 return pinnerLeakPanic
1866 }
1867
1868 var testUintptr uintptr
1869
1870 func MyGenericFunc[T any]() {
1871 systemstack(func() {
1872 testUintptr = 4
1873 })
1874 }
1875
1876 func UnsafePoint(pc uintptr) bool {
1877 fi := findfunc(pc)
1878 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1879 switch v {
1880 case abi.UnsafePointUnsafe:
1881 return true
1882 case abi.UnsafePointSafe:
1883 return false
1884 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1885
1886
1887 return false
1888 default:
1889 var buf [20]byte
1890 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1891 }
1892 }
1893
1894 type TraceMap struct {
1895 traceMap
1896 }
1897
1898 func (m *TraceMap) PutString(s string) (uint64, bool) {
1899 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1900 }
1901
1902 func (m *TraceMap) Reset() {
1903 m.traceMap.reset()
1904 }
1905
1906 func SetSpinInGCMarkDone(spin bool) {
1907 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1908 }
1909
1910 func GCMarkDoneRestarted() bool {
1911
1912 mp := acquirem()
1913 if gcphase != _GCoff {
1914 releasem(mp)
1915 return false
1916 }
1917 restarted := gcDebugMarkDone.restartedDueTo27993
1918 releasem(mp)
1919 return restarted
1920 }
1921
1922 func GCMarkDoneResetRestartFlag() {
1923 mp := acquirem()
1924 for gcphase != _GCoff {
1925 releasem(mp)
1926 Gosched()
1927 mp = acquirem()
1928 }
1929 gcDebugMarkDone.restartedDueTo27993 = false
1930 releasem(mp)
1931 }
1932
1933 type BitCursor struct {
1934 b bitCursor
1935 }
1936
1937 func NewBitCursor(buf *byte) BitCursor {
1938 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1939 }
1940
1941 func (b BitCursor) Write(data *byte, cnt uintptr) {
1942 b.b.write(data, cnt)
1943 }
1944 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1945 return BitCursor{b: b.b.offset(cnt)}
1946 }
1947
1948 const (
1949 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1950 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1951 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1952 )
1953
1954 type TraceStackTable traceStackTable
1955
1956 func (t *TraceStackTable) Reset() {
1957 t.tab.reset()
1958 }
1959
1960 func TraceStack(gp *G, tab *TraceStackTable) {
1961 traceStack(0, gp, (*traceStackTable)(tab))
1962 }
1963
View as plain text