Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/gc"
15 "internal/runtime/sys"
16 "unsafe"
17 )
18
19 var Fadd64 = fadd64
20 var Fsub64 = fsub64
21 var Fmul64 = fmul64
22 var Fdiv64 = fdiv64
23 var F64to32 = f64to32
24 var F32to64 = f32to64
25 var Fcmp64 = fcmp64
26 var Fintto64 = fintto64
27 var F64toint = f64toint
28
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
33
34 var ReadRandomFailed = &readRandomFailed
35
36 var Fastlog2 = fastlog2
37
38 var ParseByteCount = parseByteCount
39
40 var Nanotime = nanotime
41 var Cputicks = cputicks
42 var CyclesPerSecond = pprof_cyclesPerSecond
43 var NetpollBreak = netpollBreak
44 var Usleep = usleep
45
46 var PhysPageSize = physPageSize
47 var PhysHugePageSize = physHugePageSize
48
49 var NetpollGenericInit = netpollGenericInit
50
51 var Memmove = memmove
52 var MemclrNoHeapPointers = memclrNoHeapPointers
53
54 var CgoCheckPointer = cgoCheckPointer
55
56 const CrashStackImplemented = crashStackImplemented
57
58 const TracebackInnerFrames = tracebackInnerFrames
59 const TracebackOuterFrames = tracebackOuterFrames
60
61 var MapKeys = keys
62 var MapValues = values
63
64 var LockPartialOrder = lockPartialOrder
65
66 type TimeTimer = timeTimer
67
68 type LockRank lockRank
69
70 func (l LockRank) String() string {
71 return lockRank(l).String()
72 }
73
74 const PreemptMSupported = preemptMSupported
75
76 type LFNode struct {
77 Next uint64
78 Pushcnt uintptr
79 }
80
81 func LFStackPush(head *uint64, node *LFNode) {
82 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
83 }
84
85 func LFStackPop(head *uint64) *LFNode {
86 return (*LFNode)((*lfstack)(head).pop())
87 }
88 func LFNodeValidate(node *LFNode) {
89 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
90 }
91
92 func Netpoll(delta int64) {
93 systemstack(func() {
94 netpoll(delta)
95 })
96 }
97
98 func PointerMask(x any) (ret []byte) {
99 systemstack(func() {
100 ret = pointerMask(x)
101 })
102 return
103 }
104
105 func RunSchedLocalQueueTest() {
106 pp := new(p)
107 gs := make([]g, len(pp.runq))
108 Escape(gs)
109 for i := 0; i < len(pp.runq); i++ {
110 if g, _ := runqget(pp); g != nil {
111 throw("runq is not empty initially")
112 }
113 for j := 0; j < i; j++ {
114 runqput(pp, &gs[i], false)
115 }
116 for j := 0; j < i; j++ {
117 if g, _ := runqget(pp); g != &gs[i] {
118 print("bad element at iter ", i, "/", j, "\n")
119 throw("bad element")
120 }
121 }
122 if g, _ := runqget(pp); g != nil {
123 throw("runq is not empty afterwards")
124 }
125 }
126 }
127
128 func RunSchedLocalQueueStealTest() {
129 p1 := new(p)
130 p2 := new(p)
131 gs := make([]g, len(p1.runq))
132 Escape(gs)
133 for i := 0; i < len(p1.runq); i++ {
134 for j := 0; j < i; j++ {
135 gs[j].sig = 0
136 runqput(p1, &gs[j], false)
137 }
138 gp := runqsteal(p2, p1, true)
139 s := 0
140 if gp != nil {
141 s++
142 gp.sig++
143 }
144 for {
145 gp, _ = runqget(p2)
146 if gp == nil {
147 break
148 }
149 s++
150 gp.sig++
151 }
152 for {
153 gp, _ = runqget(p1)
154 if gp == nil {
155 break
156 }
157 gp.sig++
158 }
159 for j := 0; j < i; j++ {
160 if gs[j].sig != 1 {
161 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
162 throw("bad element")
163 }
164 }
165 if s != i/2 && s != i/2+1 {
166 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
167 throw("bad steal")
168 }
169 }
170 }
171
172 func RunSchedLocalQueueEmptyTest(iters int) {
173
174
175
176
177 done := make(chan bool, 1)
178 p := new(p)
179 gs := make([]g, 2)
180 Escape(gs)
181 ready := new(uint32)
182 for i := 0; i < iters; i++ {
183 *ready = 0
184 next0 := (i & 1) == 0
185 next1 := (i & 2) == 0
186 runqput(p, &gs[0], next0)
187 go func() {
188 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
189 }
190 if runqempty(p) {
191 println("next:", next0, next1)
192 throw("queue is empty")
193 }
194 done <- true
195 }()
196 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
197 }
198 runqput(p, &gs[1], next1)
199 runqget(p)
200 <-done
201 runqget(p)
202 }
203 }
204
205 var (
206 StringHash = stringHash
207 BytesHash = bytesHash
208 Int32Hash = int32Hash
209 Int64Hash = int64Hash
210 MemHash = memhash
211 MemHash32 = memhash32
212 MemHash64 = memhash64
213 EfaceHash = efaceHash
214 IfaceHash = ifaceHash
215 )
216
217 var UseAeshash = &useAeshash
218
219 func MemclrBytes(b []byte) {
220 s := (*slice)(unsafe.Pointer(&b))
221 memclrNoHeapPointers(s.array, uintptr(s.len))
222 }
223
224 const HashLoad = hashLoad
225
226
227 func GostringW(w []uint16) (s string) {
228 systemstack(func() {
229 s = gostringw(&w[0])
230 })
231 return
232 }
233
234 var Open = open
235 var Close = closefd
236 var Read = read
237 var Write = write
238
239 func Envs() []string { return envs }
240 func SetEnvs(e []string) { envs = e }
241
242 const PtrSize = goarch.PtrSize
243
244 var ForceGCPeriod = &forcegcperiod
245
246
247
248
249 func SetTracebackEnv(level string) {
250 setTraceback(level)
251 traceback_env = traceback_cache
252 }
253
254 var ReadUnaligned32 = readUnaligned32
255 var ReadUnaligned64 = readUnaligned64
256
257 func CountPagesInUse() (pagesInUse, counted uintptr) {
258 stw := stopTheWorld(stwForTestCountPagesInUse)
259
260 pagesInUse = mheap_.pagesInUse.Load()
261
262 for _, s := range mheap_.allspans {
263 if s.state.get() == mSpanInUse {
264 counted += s.npages
265 }
266 }
267
268 startTheWorld(stw)
269
270 return
271 }
272
273 func Fastrand() uint32 { return uint32(rand()) }
274 func Fastrand64() uint64 { return rand() }
275 func Fastrandn(n uint32) uint32 { return randn(n) }
276
277 type ProfBuf profBuf
278
279 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
280 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
281 }
282
283 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
284 (*profBuf)(p).write(tag, now, hdr, stk)
285 }
286
287 const (
288 ProfBufBlocking = profBufBlocking
289 ProfBufNonBlocking = profBufNonBlocking
290 )
291
292 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
293 return (*profBuf)(p).read(mode)
294 }
295
296 func (p *ProfBuf) Close() {
297 (*profBuf)(p).close()
298 }
299
300 type CPUStats = cpuStats
301
302 func ReadCPUStats() CPUStats {
303 return work.cpuStats
304 }
305
306 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
307 stw := stopTheWorld(stwForTestReadMetricsSlow)
308
309
310
311 metricsLock()
312 initMetrics()
313
314 systemstack(func() {
315
316
317 getg().racectx = getg().m.curg.racectx
318
319
320
321
322
323
324 readMetricsLocked(samplesp, len, cap)
325
326
327
328
329
330 readmemstats_m(memStats)
331
332
333
334
335 readMetricsLocked(samplesp, len, cap)
336
337
338 getg().racectx = 0
339 })
340 metricsUnlock()
341
342 startTheWorld(stw)
343 }
344
345 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
346
347
348
349 func ReadMemStatsSlow() (base, slow MemStats) {
350 stw := stopTheWorld(stwForTestReadMemStatsSlow)
351
352
353 systemstack(func() {
354
355 getg().m.mallocing++
356
357 readmemstats_m(&base)
358
359
360
361 slow = base
362 slow.Alloc = 0
363 slow.TotalAlloc = 0
364 slow.Mallocs = 0
365 slow.Frees = 0
366 slow.HeapReleased = 0
367 var bySize [gc.NumSizeClasses]struct {
368 Mallocs, Frees uint64
369 }
370
371
372 for _, s := range mheap_.allspans {
373 if s.state.get() != mSpanInUse {
374 continue
375 }
376 if s.isUnusedUserArenaChunk() {
377 continue
378 }
379 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
380 slow.Mallocs++
381 slow.Alloc += uint64(s.elemsize)
382 } else {
383 slow.Mallocs += uint64(s.allocCount)
384 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
385 bySize[sizeclass].Mallocs += uint64(s.allocCount)
386 }
387 }
388
389
390 var m heapStatsDelta
391 memstats.heapStats.unsafeRead(&m)
392
393
394 var smallFree uint64
395 for i := 0; i < gc.NumSizeClasses; i++ {
396 slow.Frees += m.smallFreeCount[i]
397 bySize[i].Frees += m.smallFreeCount[i]
398 bySize[i].Mallocs += m.smallFreeCount[i]
399 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
400 }
401 slow.Frees += m.tinyAllocCount + m.largeFreeCount
402 slow.Mallocs += slow.Frees
403
404 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
405
406 for i := range slow.BySize {
407 slow.BySize[i].Mallocs = bySize[i].Mallocs
408 slow.BySize[i].Frees = bySize[i].Frees
409 }
410
411 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
412 chunk := mheap_.pages.tryChunkOf(i)
413 if chunk == nil {
414 continue
415 }
416 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
417 slow.HeapReleased += uint64(pg) * pageSize
418 }
419 for _, p := range allp {
420 pg := sys.OnesCount64(p.pcache.scav)
421 slow.HeapReleased += uint64(pg) * pageSize
422 }
423
424 getg().m.mallocing--
425 })
426
427 startTheWorld(stw)
428 return
429 }
430
431
432
433
434 func ShrinkStackAndVerifyFramePointers() {
435 before := stackPoisonCopy
436 defer func() { stackPoisonCopy = before }()
437 stackPoisonCopy = 1
438
439 gp := getg()
440 systemstack(func() {
441 shrinkstack(gp)
442 })
443
444
445 FPCallers(make([]uintptr, 1024))
446 }
447
448
449
450
451 func BlockOnSystemStack() {
452 systemstack(blockOnSystemStackInternal)
453 }
454
455 func blockOnSystemStackInternal() {
456 print("x\n")
457 lock(&deadlock)
458 lock(&deadlock)
459 }
460
461 type RWMutex struct {
462 rw rwmutex
463 }
464
465 func (rw *RWMutex) Init() {
466 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
467 }
468
469 func (rw *RWMutex) RLock() {
470 rw.rw.rlock()
471 }
472
473 func (rw *RWMutex) RUnlock() {
474 rw.rw.runlock()
475 }
476
477 func (rw *RWMutex) Lock() {
478 rw.rw.lock()
479 }
480
481 func (rw *RWMutex) Unlock() {
482 rw.rw.unlock()
483 }
484
485 func LockOSCounts() (external, internal uint32) {
486 gp := getg()
487 if gp.m.lockedExt+gp.m.lockedInt == 0 {
488 if gp.lockedm != 0 {
489 panic("lockedm on non-locked goroutine")
490 }
491 } else {
492 if gp.lockedm == 0 {
493 panic("nil lockedm on locked goroutine")
494 }
495 }
496 return gp.m.lockedExt, gp.m.lockedInt
497 }
498
499
500 func TracebackSystemstack(stk []uintptr, i int) int {
501 if i == 0 {
502 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
503 var u unwinder
504 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
505 return tracebackPCs(&u, 0, stk)
506 }
507 n := 0
508 systemstack(func() {
509 n = TracebackSystemstack(stk, i-1)
510 })
511 return n
512 }
513
514 func KeepNArenaHints(n int) {
515 hint := mheap_.arenaHints
516 for i := 1; i < n; i++ {
517 hint = hint.next
518 if hint == nil {
519 return
520 }
521 }
522 hint.next = nil
523 }
524
525
526
527
528
529
530
531 func MapNextArenaHint() (start, end uintptr, ok bool) {
532 hint := mheap_.arenaHints
533 addr := hint.addr
534 if hint.down {
535 start, end = addr-heapArenaBytes, addr
536 addr -= physPageSize
537 } else {
538 start, end = addr, addr+heapArenaBytes
539 }
540 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
541 ok = (addr == uintptr(got))
542 if !ok {
543
544
545 sysFreeOS(got, physPageSize)
546 }
547 return
548 }
549
550 func GetNextArenaHint() uintptr {
551 return mheap_.arenaHints.addr
552 }
553
554 type G = g
555
556 type Sudog = sudog
557
558 func Getg() *G {
559 return getg()
560 }
561
562 func Goid() uint64 {
563 return getg().goid
564 }
565
566 func GIsWaitingOnMutex(gp *G) bool {
567 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
568 }
569
570 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
571
572
573 func PanicForTesting(b []byte, i int) byte {
574 return unexportedPanicForTesting(b, i)
575 }
576
577
578 func unexportedPanicForTesting(b []byte, i int) byte {
579 return b[i]
580 }
581
582 func G0StackOverflow() {
583 systemstack(func() {
584 g0 := getg()
585 sp := sys.GetCallerSP()
586
587
588
589 g0.stack.lo = sp - 4096 - stackSystem
590 g0.stackguard0 = g0.stack.lo + stackGuard
591 g0.stackguard1 = g0.stackguard0
592
593 stackOverflow(nil)
594 })
595 }
596
597 func stackOverflow(x *byte) {
598 var buf [256]byte
599 stackOverflow(&buf[0])
600 }
601
602 func RunGetgThreadSwitchTest() {
603
604
605
606
607
608
609 ch := make(chan int)
610 go func(ch chan int) {
611 ch <- 5
612 LockOSThread()
613 }(ch)
614
615 g1 := getg()
616
617
618
619
620
621 <-ch
622
623 g2 := getg()
624 if g1 != g2 {
625 panic("g1 != g2")
626 }
627
628
629
630 g3 := getg()
631 if g1 != g3 {
632 panic("g1 != g3")
633 }
634 }
635
636 const (
637 PageSize = pageSize
638 PallocChunkPages = pallocChunkPages
639 PageAlloc64Bit = pageAlloc64Bit
640 PallocSumBytes = pallocSumBytes
641 )
642
643
644 type PallocSum pallocSum
645
646 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
647 func (m PallocSum) Start() uint { return pallocSum(m).start() }
648 func (m PallocSum) Max() uint { return pallocSum(m).max() }
649 func (m PallocSum) End() uint { return pallocSum(m).end() }
650
651
652 type PallocBits pallocBits
653
654 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
655 return (*pallocBits)(b).find(npages, searchIdx)
656 }
657 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
658 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
659 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
660 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
661
662
663
664 func SummarizeSlow(b *PallocBits) PallocSum {
665 var start, most, end uint
666
667 const N = uint(len(b)) * 64
668 for start < N && (*pageBits)(b).get(start) == 0 {
669 start++
670 }
671 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
672 end++
673 }
674 run := uint(0)
675 for i := uint(0); i < N; i++ {
676 if (*pageBits)(b).get(i) == 0 {
677 run++
678 } else {
679 run = 0
680 }
681 most = max(most, run)
682 }
683 return PackPallocSum(start, most, end)
684 }
685
686
687 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
688
689
690
691 func DiffPallocBits(a, b *PallocBits) []BitRange {
692 ba := (*pageBits)(a)
693 bb := (*pageBits)(b)
694
695 var d []BitRange
696 base, size := uint(0), uint(0)
697 for i := uint(0); i < uint(len(ba))*64; i++ {
698 if ba.get(i) != bb.get(i) {
699 if size == 0 {
700 base = i
701 }
702 size++
703 } else {
704 if size != 0 {
705 d = append(d, BitRange{base, size})
706 }
707 size = 0
708 }
709 }
710 if size != 0 {
711 d = append(d, BitRange{base, size})
712 }
713 return d
714 }
715
716
717
718
719 func StringifyPallocBits(b *PallocBits, r BitRange) string {
720 str := ""
721 for j := r.I; j < r.I+r.N; j++ {
722 if (*pageBits)(b).get(j) != 0 {
723 str += "1"
724 } else {
725 str += "0"
726 }
727 }
728 return str
729 }
730
731
732 type PallocData pallocData
733
734 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
735 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
736 }
737 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
738 func (d *PallocData) ScavengedSetRange(i, n uint) {
739 (*pallocData)(d).scavenged.setRange(i, n)
740 }
741 func (d *PallocData) PallocBits() *PallocBits {
742 return (*PallocBits)(&(*pallocData)(d).pallocBits)
743 }
744 func (d *PallocData) Scavenged() *PallocBits {
745 return (*PallocBits)(&(*pallocData)(d).scavenged)
746 }
747
748
749 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
750
751
752 type PageCache pageCache
753
754 const PageCachePages = pageCachePages
755
756 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
757 return PageCache(pageCache{base: base, cache: cache, scav: scav})
758 }
759 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
760 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
761 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
762 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
763 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
764 return (*pageCache)(c).alloc(npages)
765 }
766 func (c *PageCache) Flush(s *PageAlloc) {
767 cp := (*pageCache)(c)
768 sp := (*pageAlloc)(s)
769
770 systemstack(func() {
771
772
773 lock(sp.mheapLock)
774 cp.flush(sp)
775 unlock(sp.mheapLock)
776 })
777 }
778
779
780 type ChunkIdx chunkIdx
781
782
783
784 type PageAlloc pageAlloc
785
786 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
787 pp := (*pageAlloc)(p)
788
789 var addr, scav uintptr
790 systemstack(func() {
791
792
793 lock(pp.mheapLock)
794 addr, scav = pp.alloc(npages)
795 unlock(pp.mheapLock)
796 })
797 return addr, scav
798 }
799 func (p *PageAlloc) AllocToCache() PageCache {
800 pp := (*pageAlloc)(p)
801
802 var c PageCache
803 systemstack(func() {
804
805
806 lock(pp.mheapLock)
807 c = PageCache(pp.allocToCache())
808 unlock(pp.mheapLock)
809 })
810 return c
811 }
812 func (p *PageAlloc) Free(base, npages uintptr) {
813 pp := (*pageAlloc)(p)
814
815 systemstack(func() {
816
817
818 lock(pp.mheapLock)
819 pp.free(base, npages)
820 unlock(pp.mheapLock)
821 })
822 }
823 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
824 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
825 }
826 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
827 pp := (*pageAlloc)(p)
828 systemstack(func() {
829 r = pp.scavenge(nbytes, nil, true)
830 })
831 return
832 }
833 func (p *PageAlloc) InUse() []AddrRange {
834 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
835 for _, r := range p.inUse.ranges {
836 ranges = append(ranges, AddrRange{r})
837 }
838 return ranges
839 }
840
841
842 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
843 ci := chunkIdx(i)
844 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
845 }
846
847
848 type AddrRange struct {
849 addrRange
850 }
851
852
853 func MakeAddrRange(base, limit uintptr) AddrRange {
854 return AddrRange{makeAddrRange(base, limit)}
855 }
856
857
858 func (a AddrRange) Base() uintptr {
859 return a.addrRange.base.addr()
860 }
861
862
863 func (a AddrRange) Limit() uintptr {
864 return a.addrRange.limit.addr()
865 }
866
867
868 func (a AddrRange) Equals(b AddrRange) bool {
869 return a == b
870 }
871
872
873 func (a AddrRange) Size() uintptr {
874 return a.addrRange.size()
875 }
876
877
878
879
880
881 var testSysStat = &memstats.other_sys
882
883
884 type AddrRanges struct {
885 addrRanges
886 mutable bool
887 }
888
889
890
891
892
893
894
895
896
897
898 func NewAddrRanges() AddrRanges {
899 r := addrRanges{}
900 r.init(testSysStat)
901 return AddrRanges{r, true}
902 }
903
904
905
906
907
908
909 func MakeAddrRanges(a ...AddrRange) AddrRanges {
910
911
912
913
914
915 ranges := make([]addrRange, 0, len(a))
916 total := uintptr(0)
917 for _, r := range a {
918 ranges = append(ranges, r.addrRange)
919 total += r.Size()
920 }
921 return AddrRanges{addrRanges{
922 ranges: ranges,
923 totalBytes: total,
924 sysStat: testSysStat,
925 }, false}
926 }
927
928
929
930 func (a *AddrRanges) Ranges() []AddrRange {
931 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
932 for _, r := range a.addrRanges.ranges {
933 result = append(result, AddrRange{r})
934 }
935 return result
936 }
937
938
939
940 func (a *AddrRanges) FindSucc(base uintptr) int {
941 return a.findSucc(base)
942 }
943
944
945
946
947
948 func (a *AddrRanges) Add(r AddrRange) {
949 if !a.mutable {
950 throw("attempt to mutate immutable AddrRanges")
951 }
952 a.add(r.addrRange)
953 }
954
955
956 func (a *AddrRanges) TotalBytes() uintptr {
957 return a.addrRanges.totalBytes
958 }
959
960
961 type BitRange struct {
962 I, N uint
963 }
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
980 p := new(pageAlloc)
981
982
983 p.init(new(mutex), testSysStat, true)
984 lockInit(p.mheapLock, lockRankMheap)
985 for i, init := range chunks {
986 addr := chunkBase(chunkIdx(i))
987
988
989 systemstack(func() {
990 lock(p.mheapLock)
991 p.grow(addr, pallocChunkBytes)
992 unlock(p.mheapLock)
993 })
994
995
996 ci := chunkIndex(addr)
997 chunk := p.chunkOf(ci)
998
999
1000 chunk.scavenged.clearRange(0, pallocChunkPages)
1001
1002
1003
1004
1005 p.scav.index.alloc(ci, pallocChunkPages)
1006 p.scav.index.free(ci, 0, pallocChunkPages)
1007
1008
1009 if scav != nil {
1010 if scvg, ok := scav[i]; ok {
1011 for _, s := range scvg {
1012
1013
1014 if s.N != 0 {
1015 chunk.scavenged.setRange(s.I, s.N)
1016 }
1017 }
1018 }
1019 }
1020
1021
1022 for _, s := range init {
1023
1024
1025 if s.N != 0 {
1026 chunk.allocRange(s.I, s.N)
1027
1028
1029 p.scav.index.alloc(ci, s.N)
1030 }
1031 }
1032
1033
1034 systemstack(func() {
1035 lock(p.mheapLock)
1036 p.update(addr, pallocChunkPages, false, false)
1037 unlock(p.mheapLock)
1038 })
1039 }
1040
1041 return (*PageAlloc)(p)
1042 }
1043
1044
1045
1046
1047 func FreePageAlloc(pp *PageAlloc) {
1048 p := (*pageAlloc)(pp)
1049
1050
1051 if pageAlloc64Bit != 0 {
1052 for l := 0; l < summaryLevels; l++ {
1053 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1054 }
1055 } else {
1056 resSize := uintptr(0)
1057 for _, s := range p.summary {
1058 resSize += uintptr(cap(s)) * pallocSumBytes
1059 }
1060 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1061 }
1062
1063
1064 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1065
1066
1067
1068
1069
1070 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1071 testSysStat.add(-int64(p.summaryMappedReady))
1072
1073
1074 for i := range p.chunks {
1075 if x := p.chunks[i]; x != nil {
1076 p.chunks[i] = nil
1077
1078 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1079 }
1080 }
1081 }
1082
1083
1084
1085
1086
1087
1088
1089 var BaseChunkIdx = func() ChunkIdx {
1090 var prefix uintptr
1091 if pageAlloc64Bit != 0 {
1092 prefix = 0xc000
1093 } else {
1094 prefix = 0x100
1095 }
1096 baseAddr := prefix * pallocChunkBytes
1097 if goos.IsAix != 0 {
1098 baseAddr += arenaBaseOffset
1099 }
1100 return ChunkIdx(chunkIndex(baseAddr))
1101 }()
1102
1103
1104
1105 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1106 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1107 }
1108
1109 type BitsMismatch struct {
1110 Base uintptr
1111 Got, Want uint64
1112 }
1113
1114 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1115 ok = true
1116
1117
1118 systemstack(func() {
1119 getg().m.mallocing++
1120
1121
1122 lock(&mheap_.lock)
1123 chunkLoop:
1124 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1125 chunk := mheap_.pages.tryChunkOf(i)
1126 if chunk == nil {
1127 continue
1128 }
1129 for j := 0; j < pallocChunkPages/64; j++ {
1130
1131
1132
1133
1134
1135 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1136 got := chunk.scavenged[j]
1137 if want != got {
1138 ok = false
1139 if n >= len(mismatches) {
1140 break chunkLoop
1141 }
1142 mismatches[n] = BitsMismatch{
1143 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1144 Got: got,
1145 Want: want,
1146 }
1147 n++
1148 }
1149 }
1150 }
1151 unlock(&mheap_.lock)
1152
1153 getg().m.mallocing--
1154 })
1155 return
1156 }
1157
1158 func PageCachePagesLeaked() (leaked uintptr) {
1159 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1160
1161
1162 deadp := allp[len(allp):cap(allp)]
1163 for _, p := range deadp {
1164
1165
1166 if p != nil {
1167 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1168 }
1169 }
1170
1171 startTheWorld(stw)
1172 return
1173 }
1174
1175 var ProcYield = procyield
1176 var OSYield = osyield
1177
1178 type Mutex = mutex
1179
1180 var Lock = lock
1181 var Unlock = unlock
1182
1183 var MutexContended = mutexContended
1184
1185 func SemRootLock(addr *uint32) *mutex {
1186 root := semtable.rootFor(addr)
1187 return &root.lock
1188 }
1189
1190 var Semacquire = semacquire
1191 var Semrelease1 = semrelease1
1192
1193 func SemNwait(addr *uint32) uint32 {
1194 root := semtable.rootFor(addr)
1195 return root.nwait.Load()
1196 }
1197
1198 const SemTableSize = semTabSize
1199
1200
1201 type SemTable struct {
1202 semTable
1203 }
1204
1205
1206 func (t *SemTable) Enqueue(addr *uint32) {
1207 s := acquireSudog()
1208 s.releasetime = 0
1209 s.acquiretime = 0
1210 s.ticket = 0
1211 t.semTable.rootFor(addr).queue(addr, s, false)
1212 }
1213
1214
1215
1216
1217 func (t *SemTable) Dequeue(addr *uint32) bool {
1218 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1219 if s != nil {
1220 releaseSudog(s)
1221 return true
1222 }
1223 return false
1224 }
1225
1226
1227 type MSpan mspan
1228
1229
1230 func AllocMSpan() *MSpan {
1231 var s *mspan
1232 systemstack(func() {
1233 lock(&mheap_.lock)
1234 s = (*mspan)(mheap_.spanalloc.alloc())
1235 s.init(0, 0)
1236 unlock(&mheap_.lock)
1237 })
1238 return (*MSpan)(s)
1239 }
1240
1241
1242 func FreeMSpan(s *MSpan) {
1243 systemstack(func() {
1244 lock(&mheap_.lock)
1245 mheap_.spanalloc.free(unsafe.Pointer(s))
1246 unlock(&mheap_.lock)
1247 })
1248 }
1249
1250 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1251 s := (*mspan)(ms)
1252 s.nelems = uint16(len(bits) * 8)
1253 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1254 result := s.countAlloc()
1255 s.gcmarkBits = nil
1256 return result
1257 }
1258
1259 type MSpanQueue mSpanQueue
1260
1261 func (q *MSpanQueue) Size() int {
1262 return (*mSpanQueue)(q).n
1263 }
1264
1265 func (q *MSpanQueue) Push(s *MSpan) {
1266 (*mSpanQueue)(q).push((*mspan)(s))
1267 }
1268
1269 func (q *MSpanQueue) Pop() *MSpan {
1270 s := (*mSpanQueue)(q).pop()
1271 return (*MSpan)(s)
1272 }
1273
1274 func (q *MSpanQueue) TakeAll(p *MSpanQueue) {
1275 (*mSpanQueue)(q).takeAll((*mSpanQueue)(p))
1276 }
1277
1278 func (q *MSpanQueue) PopN(n int) MSpanQueue {
1279 p := (*mSpanQueue)(q).popN(n)
1280 return (MSpanQueue)(p)
1281 }
1282
1283 const (
1284 TimeHistSubBucketBits = timeHistSubBucketBits
1285 TimeHistNumSubBuckets = timeHistNumSubBuckets
1286 TimeHistNumBuckets = timeHistNumBuckets
1287 TimeHistMinBucketBits = timeHistMinBucketBits
1288 TimeHistMaxBucketBits = timeHistMaxBucketBits
1289 )
1290
1291 type TimeHistogram timeHistogram
1292
1293
1294
1295
1296
1297 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1298 t := (*timeHistogram)(th)
1299 if bucket < 0 {
1300 return t.underflow.Load(), false
1301 }
1302 i := bucket*TimeHistNumSubBuckets + subBucket
1303 if i >= len(t.counts) {
1304 return t.overflow.Load(), false
1305 }
1306 return t.counts[i].Load(), true
1307 }
1308
1309 func (th *TimeHistogram) Record(duration int64) {
1310 (*timeHistogram)(th).record(duration)
1311 }
1312
1313 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1314
1315 func SetIntArgRegs(a int) int {
1316 lock(&finlock)
1317 old := intArgRegs
1318 if a >= 0 {
1319 intArgRegs = a
1320 }
1321 unlock(&finlock)
1322 return old
1323 }
1324
1325 func FinalizerGAsleep() bool {
1326 return fingStatus.Load()&fingWait != 0
1327 }
1328
1329
1330
1331
1332 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1333
1334
1335
1336 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1337 return gcTestIsReachable(ptrs...)
1338 }
1339
1340
1341
1342
1343
1344
1345
1346 func GCTestPointerClass(p unsafe.Pointer) string {
1347 return gcTestPointerClass(p)
1348 }
1349
1350 const Raceenabled = raceenabled
1351
1352 const (
1353 GCBackgroundUtilization = gcBackgroundUtilization
1354 GCGoalUtilization = gcGoalUtilization
1355 DefaultHeapMinimum = defaultHeapMinimum
1356 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1357 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1358 )
1359
1360 type GCController struct {
1361 gcControllerState
1362 }
1363
1364 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1365
1366
1367
1368
1369 g := Escape(new(GCController))
1370 g.gcControllerState.test = true
1371 g.init(int32(gcPercent), memoryLimit)
1372 return g
1373 }
1374
1375 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1376 trigger, _ := c.trigger()
1377 if c.heapMarked > trigger {
1378 trigger = c.heapMarked
1379 }
1380 c.maxStackScan.Store(stackSize)
1381 c.globalsScan.Store(globalsSize)
1382 c.heapLive.Store(trigger)
1383 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1384 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1385 }
1386
1387 func (c *GCController) AssistWorkPerByte() float64 {
1388 return c.assistWorkPerByte.Load()
1389 }
1390
1391 func (c *GCController) HeapGoal() uint64 {
1392 return c.heapGoal()
1393 }
1394
1395 func (c *GCController) HeapLive() uint64 {
1396 return c.heapLive.Load()
1397 }
1398
1399 func (c *GCController) HeapMarked() uint64 {
1400 return c.heapMarked
1401 }
1402
1403 func (c *GCController) Triggered() uint64 {
1404 return c.triggered
1405 }
1406
1407 type GCControllerReviseDelta struct {
1408 HeapLive int64
1409 HeapScan int64
1410 HeapScanWork int64
1411 StackScanWork int64
1412 GlobalsScanWork int64
1413 }
1414
1415 func (c *GCController) Revise(d GCControllerReviseDelta) {
1416 c.heapLive.Add(d.HeapLive)
1417 c.heapScan.Add(d.HeapScan)
1418 c.heapScanWork.Add(d.HeapScanWork)
1419 c.stackScanWork.Add(d.StackScanWork)
1420 c.globalsScanWork.Add(d.GlobalsScanWork)
1421 c.revise()
1422 }
1423
1424 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1425 c.assistTime.Store(assistTime)
1426 c.endCycle(elapsed, gomaxprocs, false)
1427 c.resetLive(bytesMarked)
1428 c.commit(false)
1429 }
1430
1431 func (c *GCController) AddIdleMarkWorker() bool {
1432 return c.addIdleMarkWorker()
1433 }
1434
1435 func (c *GCController) NeedIdleMarkWorker() bool {
1436 return c.needIdleMarkWorker()
1437 }
1438
1439 func (c *GCController) RemoveIdleMarkWorker() {
1440 c.removeIdleMarkWorker()
1441 }
1442
1443 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1444 c.setMaxIdleMarkWorkers(max)
1445 }
1446
1447 var alwaysFalse bool
1448 var escapeSink any
1449
1450 func Escape[T any](x T) T {
1451 if alwaysFalse {
1452 escapeSink = x
1453 }
1454 return x
1455 }
1456
1457
1458 func Acquirem() {
1459 acquirem()
1460 }
1461
1462 func Releasem() {
1463 releasem(getg().m)
1464 }
1465
1466 var Timediv = timediv
1467
1468 type PIController struct {
1469 piController
1470 }
1471
1472 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1473 return &PIController{piController{
1474 kp: kp,
1475 ti: ti,
1476 tt: tt,
1477 min: min,
1478 max: max,
1479 }}
1480 }
1481
1482 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1483 return c.piController.next(input, setpoint, period)
1484 }
1485
1486 const (
1487 CapacityPerProc = capacityPerProc
1488 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1489 )
1490
1491 type GCCPULimiter struct {
1492 limiter gcCPULimiterState
1493 }
1494
1495 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1496
1497
1498
1499
1500 l := Escape(new(GCCPULimiter))
1501 l.limiter.test = true
1502 l.limiter.resetCapacity(now, gomaxprocs)
1503 return l
1504 }
1505
1506 func (l *GCCPULimiter) Fill() uint64 {
1507 return l.limiter.bucket.fill
1508 }
1509
1510 func (l *GCCPULimiter) Capacity() uint64 {
1511 return l.limiter.bucket.capacity
1512 }
1513
1514 func (l *GCCPULimiter) Overflow() uint64 {
1515 return l.limiter.overflow
1516 }
1517
1518 func (l *GCCPULimiter) Limiting() bool {
1519 return l.limiter.limiting()
1520 }
1521
1522 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1523 return l.limiter.needUpdate(now)
1524 }
1525
1526 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1527 l.limiter.startGCTransition(enableGC, now)
1528 }
1529
1530 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1531 l.limiter.finishGCTransition(now)
1532 }
1533
1534 func (l *GCCPULimiter) Update(now int64) {
1535 l.limiter.update(now)
1536 }
1537
1538 func (l *GCCPULimiter) AddAssistTime(t int64) {
1539 l.limiter.addAssistTime(t)
1540 }
1541
1542 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1543 l.limiter.resetCapacity(now, nprocs)
1544 }
1545
1546 const ScavengePercent = scavengePercent
1547
1548 type Scavenger struct {
1549 Sleep func(int64) int64
1550 Scavenge func(uintptr) (uintptr, int64)
1551 ShouldStop func() bool
1552 GoMaxProcs func() int32
1553
1554 released atomic.Uintptr
1555 scavenger scavengerState
1556 stop chan<- struct{}
1557 done <-chan struct{}
1558 }
1559
1560 func (s *Scavenger) Start() {
1561 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1562 panic("must populate all stubs")
1563 }
1564
1565
1566 s.scavenger.sleepStub = s.Sleep
1567 s.scavenger.scavenge = s.Scavenge
1568 s.scavenger.shouldStop = s.ShouldStop
1569 s.scavenger.gomaxprocs = s.GoMaxProcs
1570
1571
1572 stop := make(chan struct{})
1573 s.stop = stop
1574 done := make(chan struct{})
1575 s.done = done
1576 go func() {
1577
1578 s.scavenger.init()
1579 s.scavenger.park()
1580 for {
1581 select {
1582 case <-stop:
1583 close(done)
1584 return
1585 default:
1586 }
1587 released, workTime := s.scavenger.run()
1588 if released == 0 {
1589 s.scavenger.park()
1590 continue
1591 }
1592 s.released.Add(released)
1593 s.scavenger.sleep(workTime)
1594 }
1595 }()
1596 if !s.BlockUntilParked(1e9 ) {
1597 panic("timed out waiting for scavenger to get ready")
1598 }
1599 }
1600
1601
1602
1603
1604
1605
1606
1607 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1608
1609
1610
1611
1612
1613 start := nanotime()
1614 for nanotime()-start < timeout {
1615 lock(&s.scavenger.lock)
1616 parked := s.scavenger.parked
1617 unlock(&s.scavenger.lock)
1618 if parked {
1619 return true
1620 }
1621 Gosched()
1622 }
1623 return false
1624 }
1625
1626
1627 func (s *Scavenger) Released() uintptr {
1628 return s.released.Load()
1629 }
1630
1631
1632 func (s *Scavenger) Wake() {
1633 s.scavenger.wake()
1634 }
1635
1636
1637
1638 func (s *Scavenger) Stop() {
1639 lock(&s.scavenger.lock)
1640 parked := s.scavenger.parked
1641 unlock(&s.scavenger.lock)
1642 if !parked {
1643 panic("tried to clean up scavenger that is not parked")
1644 }
1645 close(s.stop)
1646 s.Wake()
1647 <-s.done
1648 }
1649
1650 type ScavengeIndex struct {
1651 i scavengeIndex
1652 }
1653
1654 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1655 s := new(ScavengeIndex)
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 s.i.chunks = make([]atomicScavChunkData, max)
1668 s.i.min.Store(uintptr(min))
1669 s.i.max.Store(uintptr(max))
1670 s.i.minHeapIdx.Store(uintptr(min))
1671 s.i.test = true
1672 return s
1673 }
1674
1675 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1676 ci, off := s.i.find(force)
1677 return ChunkIdx(ci), off
1678 }
1679
1680 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1681 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1682 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1683
1684 if sc == ec {
1685
1686 s.i.alloc(sc, ei+1-si)
1687 } else {
1688
1689 s.i.alloc(sc, pallocChunkPages-si)
1690 for c := sc + 1; c < ec; c++ {
1691 s.i.alloc(c, pallocChunkPages)
1692 }
1693 s.i.alloc(ec, ei+1)
1694 }
1695 }
1696
1697 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1698 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1699 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1700
1701 if sc == ec {
1702
1703 s.i.free(sc, si, ei+1-si)
1704 } else {
1705
1706 s.i.free(sc, si, pallocChunkPages-si)
1707 for c := sc + 1; c < ec; c++ {
1708 s.i.free(c, 0, pallocChunkPages)
1709 }
1710 s.i.free(ec, 0, ei+1)
1711 }
1712 }
1713
1714 func (s *ScavengeIndex) ResetSearchAddrs() {
1715 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1716 addr, marked := a.Load()
1717 if marked {
1718 a.StoreUnmark(addr, addr)
1719 }
1720 a.Clear()
1721 }
1722 s.i.freeHWM = minOffAddr
1723 }
1724
1725 func (s *ScavengeIndex) NextGen() {
1726 s.i.nextGen()
1727 }
1728
1729 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1730 s.i.setEmpty(chunkIdx(ci))
1731 }
1732
1733 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1734 sc0 := scavChunkData{
1735 gen: gen,
1736 inUse: inUse,
1737 lastInUse: lastInUse,
1738 scavChunkFlags: scavChunkFlags(flags),
1739 }
1740 scp := sc0.pack()
1741 sc1 := unpackScavChunkData(scp)
1742 return sc0 == sc1
1743 }
1744
1745 const GTrackingPeriod = gTrackingPeriod
1746
1747 var ZeroBase = unsafe.Pointer(&zerobase)
1748
1749 const UserArenaChunkBytes = userArenaChunkBytes
1750
1751 type UserArena struct {
1752 arena *userArena
1753 }
1754
1755 func NewUserArena() *UserArena {
1756 return &UserArena{newUserArena()}
1757 }
1758
1759 func (a *UserArena) New(out *any) {
1760 i := efaceOf(out)
1761 typ := i._type
1762 if typ.Kind_&abi.KindMask != abi.Pointer {
1763 panic("new result of non-ptr type")
1764 }
1765 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1766 i.data = a.arena.new(typ)
1767 }
1768
1769 func (a *UserArena) Slice(sl any, cap int) {
1770 a.arena.slice(sl, cap)
1771 }
1772
1773 func (a *UserArena) Free() {
1774 a.arena.free()
1775 }
1776
1777 func GlobalWaitingArenaChunks() int {
1778 n := 0
1779 systemstack(func() {
1780 lock(&mheap_.lock)
1781 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1782 n++
1783 }
1784 unlock(&mheap_.lock)
1785 })
1786 return n
1787 }
1788
1789 func UserArenaClone[T any](s T) T {
1790 return arena_heapify(s).(T)
1791 }
1792
1793 var AlignUp = alignUp
1794
1795 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1796 return blockUntilEmptyFinalizerQueue(timeout)
1797 }
1798
1799 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1800 return gcCleanups.blockUntilEmpty(timeout)
1801 }
1802
1803 func FrameStartLine(f *Frame) int {
1804 return f.startLine
1805 }
1806
1807
1808
1809 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1810 return persistentalloc(n, align, &memstats.other_sys)
1811 }
1812
1813 const TagAlign = tagAlign
1814
1815
1816
1817 func FPCallers(pcBuf []uintptr) int {
1818 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1819 }
1820
1821 const FramePointerEnabled = framepointer_enabled
1822
1823 var (
1824 IsPinned = isPinned
1825 GetPinCounter = pinnerGetPinCounter
1826 )
1827
1828 func SetPinnerLeakPanic(f func()) {
1829 pinnerLeakPanic = f
1830 }
1831 func GetPinnerLeakPanic() func() {
1832 return pinnerLeakPanic
1833 }
1834
1835 var testUintptr uintptr
1836
1837 func MyGenericFunc[T any]() {
1838 systemstack(func() {
1839 testUintptr = 4
1840 })
1841 }
1842
1843 func UnsafePoint(pc uintptr) bool {
1844 fi := findfunc(pc)
1845 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1846 switch v {
1847 case abi.UnsafePointUnsafe:
1848 return true
1849 case abi.UnsafePointSafe:
1850 return false
1851 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1852
1853
1854 return false
1855 default:
1856 var buf [20]byte
1857 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1858 }
1859 }
1860
1861 type TraceMap struct {
1862 traceMap
1863 }
1864
1865 func (m *TraceMap) PutString(s string) (uint64, bool) {
1866 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1867 }
1868
1869 func (m *TraceMap) Reset() {
1870 m.traceMap.reset()
1871 }
1872
1873 func SetSpinInGCMarkDone(spin bool) {
1874 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1875 }
1876
1877 func GCMarkDoneRestarted() bool {
1878
1879 mp := acquirem()
1880 if gcphase != _GCoff {
1881 releasem(mp)
1882 return false
1883 }
1884 restarted := gcDebugMarkDone.restartedDueTo27993
1885 releasem(mp)
1886 return restarted
1887 }
1888
1889 func GCMarkDoneResetRestartFlag() {
1890 mp := acquirem()
1891 for gcphase != _GCoff {
1892 releasem(mp)
1893 Gosched()
1894 mp = acquirem()
1895 }
1896 gcDebugMarkDone.restartedDueTo27993 = false
1897 releasem(mp)
1898 }
1899
1900 type BitCursor struct {
1901 b bitCursor
1902 }
1903
1904 func NewBitCursor(buf *byte) BitCursor {
1905 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1906 }
1907
1908 func (b BitCursor) Write(data *byte, cnt uintptr) {
1909 b.b.write(data, cnt)
1910 }
1911 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1912 return BitCursor{b: b.b.offset(cnt)}
1913 }
1914
1915 const (
1916 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1917 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1918 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1919 )
1920
View as plain text