Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/sys"
15 "unsafe"
16 )
17
18 var Fadd64 = fadd64
19 var Fsub64 = fsub64
20 var Fmul64 = fmul64
21 var Fdiv64 = fdiv64
22 var F64to32 = f64to32
23 var F32to64 = f32to64
24 var Fcmp64 = fcmp64
25 var Fintto64 = fintto64
26 var F64toint = f64toint
27
28 var Entersyscall = entersyscall
29 var Exitsyscall = exitsyscall
30 var LockedOSThread = lockedOSThread
31 var Xadduintptr = atomic.Xadduintptr
32
33 var ReadRandomFailed = &readRandomFailed
34
35 var Fastlog2 = fastlog2
36
37 var Atoi = atoi
38 var Atoi32 = atoi32
39 var ParseByteCount = parseByteCount
40
41 var Nanotime = nanotime
42 var NetpollBreak = netpollBreak
43 var Usleep = usleep
44
45 var PhysPageSize = physPageSize
46 var PhysHugePageSize = physHugePageSize
47
48 var NetpollGenericInit = netpollGenericInit
49
50 var Memmove = memmove
51 var MemclrNoHeapPointers = memclrNoHeapPointers
52
53 var CgoCheckPointer = cgoCheckPointer
54
55 const CrashStackImplemented = crashStackImplemented
56
57 const TracebackInnerFrames = tracebackInnerFrames
58 const TracebackOuterFrames = tracebackOuterFrames
59
60 var MapKeys = keys
61 var MapValues = values
62
63 var LockPartialOrder = lockPartialOrder
64
65 type TimeTimer = timeTimer
66
67 type LockRank lockRank
68
69 func (l LockRank) String() string {
70 return lockRank(l).String()
71 }
72
73 const PreemptMSupported = preemptMSupported
74
75 type LFNode struct {
76 Next uint64
77 Pushcnt uintptr
78 }
79
80 func LFStackPush(head *uint64, node *LFNode) {
81 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
82 }
83
84 func LFStackPop(head *uint64) *LFNode {
85 return (*LFNode)((*lfstack)(head).pop())
86 }
87 func LFNodeValidate(node *LFNode) {
88 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
89 }
90
91 func Netpoll(delta int64) {
92 systemstack(func() {
93 netpoll(delta)
94 })
95 }
96
97 func GCMask(x any) (ret []byte) {
98 systemstack(func() {
99 ret = getgcmask(x)
100 })
101 return
102 }
103
104 func RunSchedLocalQueueTest() {
105 pp := new(p)
106 gs := make([]g, len(pp.runq))
107 Escape(gs)
108 for i := 0; i < len(pp.runq); i++ {
109 if g, _ := runqget(pp); g != nil {
110 throw("runq is not empty initially")
111 }
112 for j := 0; j < i; j++ {
113 runqput(pp, &gs[i], false)
114 }
115 for j := 0; j < i; j++ {
116 if g, _ := runqget(pp); g != &gs[i] {
117 print("bad element at iter ", i, "/", j, "\n")
118 throw("bad element")
119 }
120 }
121 if g, _ := runqget(pp); g != nil {
122 throw("runq is not empty afterwards")
123 }
124 }
125 }
126
127 func RunSchedLocalQueueStealTest() {
128 p1 := new(p)
129 p2 := new(p)
130 gs := make([]g, len(p1.runq))
131 Escape(gs)
132 for i := 0; i < len(p1.runq); i++ {
133 for j := 0; j < i; j++ {
134 gs[j].sig = 0
135 runqput(p1, &gs[j], false)
136 }
137 gp := runqsteal(p2, p1, true)
138 s := 0
139 if gp != nil {
140 s++
141 gp.sig++
142 }
143 for {
144 gp, _ = runqget(p2)
145 if gp == nil {
146 break
147 }
148 s++
149 gp.sig++
150 }
151 for {
152 gp, _ = runqget(p1)
153 if gp == nil {
154 break
155 }
156 gp.sig++
157 }
158 for j := 0; j < i; j++ {
159 if gs[j].sig != 1 {
160 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
161 throw("bad element")
162 }
163 }
164 if s != i/2 && s != i/2+1 {
165 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
166 throw("bad steal")
167 }
168 }
169 }
170
171 func RunSchedLocalQueueEmptyTest(iters int) {
172
173
174
175
176 done := make(chan bool, 1)
177 p := new(p)
178 gs := make([]g, 2)
179 Escape(gs)
180 ready := new(uint32)
181 for i := 0; i < iters; i++ {
182 *ready = 0
183 next0 := (i & 1) == 0
184 next1 := (i & 2) == 0
185 runqput(p, &gs[0], next0)
186 go func() {
187 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
188 }
189 if runqempty(p) {
190 println("next:", next0, next1)
191 throw("queue is empty")
192 }
193 done <- true
194 }()
195 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
196 }
197 runqput(p, &gs[1], next1)
198 runqget(p)
199 <-done
200 runqget(p)
201 }
202 }
203
204 var (
205 StringHash = stringHash
206 BytesHash = bytesHash
207 Int32Hash = int32Hash
208 Int64Hash = int64Hash
209 MemHash = memhash
210 MemHash32 = memhash32
211 MemHash64 = memhash64
212 EfaceHash = efaceHash
213 IfaceHash = ifaceHash
214 )
215
216 var UseAeshash = &useAeshash
217
218 func MemclrBytes(b []byte) {
219 s := (*slice)(unsafe.Pointer(&b))
220 memclrNoHeapPointers(s.array, uintptr(s.len))
221 }
222
223 const HashLoad = hashLoad
224
225
226 func GostringW(w []uint16) (s string) {
227 systemstack(func() {
228 s = gostringw(&w[0])
229 })
230 return
231 }
232
233 var Open = open
234 var Close = closefd
235 var Read = read
236 var Write = write
237
238 func Envs() []string { return envs }
239 func SetEnvs(e []string) { envs = e }
240
241 const PtrSize = goarch.PtrSize
242
243 var ForceGCPeriod = &forcegcperiod
244
245
246
247
248 func SetTracebackEnv(level string) {
249 setTraceback(level)
250 traceback_env = traceback_cache
251 }
252
253 var ReadUnaligned32 = readUnaligned32
254 var ReadUnaligned64 = readUnaligned64
255
256 func CountPagesInUse() (pagesInUse, counted uintptr) {
257 stw := stopTheWorld(stwForTestCountPagesInUse)
258
259 pagesInUse = mheap_.pagesInUse.Load()
260
261 for _, s := range mheap_.allspans {
262 if s.state.get() == mSpanInUse {
263 counted += s.npages
264 }
265 }
266
267 startTheWorld(stw)
268
269 return
270 }
271
272 func Fastrand() uint32 { return uint32(rand()) }
273 func Fastrand64() uint64 { return rand() }
274 func Fastrandn(n uint32) uint32 { return randn(n) }
275
276 type ProfBuf profBuf
277
278 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
279 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
280 }
281
282 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
283 (*profBuf)(p).write(tag, now, hdr, stk)
284 }
285
286 const (
287 ProfBufBlocking = profBufBlocking
288 ProfBufNonBlocking = profBufNonBlocking
289 )
290
291 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
292 return (*profBuf)(p).read(mode)
293 }
294
295 func (p *ProfBuf) Close() {
296 (*profBuf)(p).close()
297 }
298
299 type CPUStats = cpuStats
300
301 func ReadCPUStats() CPUStats {
302 return work.cpuStats
303 }
304
305 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
306 stw := stopTheWorld(stwForTestReadMetricsSlow)
307
308
309
310 metricsLock()
311 initMetrics()
312
313 systemstack(func() {
314
315
316 getg().racectx = getg().m.curg.racectx
317
318
319
320
321
322
323 readMetricsLocked(samplesp, len, cap)
324
325
326
327
328
329 readmemstats_m(memStats)
330
331
332
333
334 readMetricsLocked(samplesp, len, cap)
335
336
337 getg().racectx = 0
338 })
339 metricsUnlock()
340
341 startTheWorld(stw)
342 }
343
344 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
345
346
347
348 func ReadMemStatsSlow() (base, slow MemStats) {
349 stw := stopTheWorld(stwForTestReadMemStatsSlow)
350
351
352 systemstack(func() {
353
354 getg().m.mallocing++
355
356 readmemstats_m(&base)
357
358
359
360 slow = base
361 slow.Alloc = 0
362 slow.TotalAlloc = 0
363 slow.Mallocs = 0
364 slow.Frees = 0
365 slow.HeapReleased = 0
366 var bySize [_NumSizeClasses]struct {
367 Mallocs, Frees uint64
368 }
369
370
371 for _, s := range mheap_.allspans {
372 if s.state.get() != mSpanInUse {
373 continue
374 }
375 if s.isUnusedUserArenaChunk() {
376 continue
377 }
378 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
379 slow.Mallocs++
380 slow.Alloc += uint64(s.elemsize)
381 } else {
382 slow.Mallocs += uint64(s.allocCount)
383 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
384 bySize[sizeclass].Mallocs += uint64(s.allocCount)
385 }
386 }
387
388
389 var m heapStatsDelta
390 memstats.heapStats.unsafeRead(&m)
391
392
393 var smallFree uint64
394 for i := 0; i < _NumSizeClasses; i++ {
395 slow.Frees += m.smallFreeCount[i]
396 bySize[i].Frees += m.smallFreeCount[i]
397 bySize[i].Mallocs += m.smallFreeCount[i]
398 smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
399 }
400 slow.Frees += m.tinyAllocCount + m.largeFreeCount
401 slow.Mallocs += slow.Frees
402
403 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
404
405 for i := range slow.BySize {
406 slow.BySize[i].Mallocs = bySize[i].Mallocs
407 slow.BySize[i].Frees = bySize[i].Frees
408 }
409
410 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
411 chunk := mheap_.pages.tryChunkOf(i)
412 if chunk == nil {
413 continue
414 }
415 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
416 slow.HeapReleased += uint64(pg) * pageSize
417 }
418 for _, p := range allp {
419 pg := sys.OnesCount64(p.pcache.scav)
420 slow.HeapReleased += uint64(pg) * pageSize
421 }
422
423 getg().m.mallocing--
424 })
425
426 startTheWorld(stw)
427 return
428 }
429
430
431
432
433 func ShrinkStackAndVerifyFramePointers() {
434 before := stackPoisonCopy
435 defer func() { stackPoisonCopy = before }()
436 stackPoisonCopy = 1
437
438 gp := getg()
439 systemstack(func() {
440 shrinkstack(gp)
441 })
442
443
444 FPCallers(make([]uintptr, 1024))
445 }
446
447
448
449
450 func BlockOnSystemStack() {
451 systemstack(blockOnSystemStackInternal)
452 }
453
454 func blockOnSystemStackInternal() {
455 print("x\n")
456 lock(&deadlock)
457 lock(&deadlock)
458 }
459
460 type RWMutex struct {
461 rw rwmutex
462 }
463
464 func (rw *RWMutex) Init() {
465 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
466 }
467
468 func (rw *RWMutex) RLock() {
469 rw.rw.rlock()
470 }
471
472 func (rw *RWMutex) RUnlock() {
473 rw.rw.runlock()
474 }
475
476 func (rw *RWMutex) Lock() {
477 rw.rw.lock()
478 }
479
480 func (rw *RWMutex) Unlock() {
481 rw.rw.unlock()
482 }
483
484 func LockOSCounts() (external, internal uint32) {
485 gp := getg()
486 if gp.m.lockedExt+gp.m.lockedInt == 0 {
487 if gp.lockedm != 0 {
488 panic("lockedm on non-locked goroutine")
489 }
490 } else {
491 if gp.lockedm == 0 {
492 panic("nil lockedm on locked goroutine")
493 }
494 }
495 return gp.m.lockedExt, gp.m.lockedInt
496 }
497
498
499 func TracebackSystemstack(stk []uintptr, i int) int {
500 if i == 0 {
501 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
502 var u unwinder
503 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
504 return tracebackPCs(&u, 0, stk)
505 }
506 n := 0
507 systemstack(func() {
508 n = TracebackSystemstack(stk, i-1)
509 })
510 return n
511 }
512
513 func KeepNArenaHints(n int) {
514 hint := mheap_.arenaHints
515 for i := 1; i < n; i++ {
516 hint = hint.next
517 if hint == nil {
518 return
519 }
520 }
521 hint.next = nil
522 }
523
524
525
526
527
528
529
530 func MapNextArenaHint() (start, end uintptr, ok bool) {
531 hint := mheap_.arenaHints
532 addr := hint.addr
533 if hint.down {
534 start, end = addr-heapArenaBytes, addr
535 addr -= physPageSize
536 } else {
537 start, end = addr, addr+heapArenaBytes
538 }
539 got := sysReserve(unsafe.Pointer(addr), physPageSize)
540 ok = (addr == uintptr(got))
541 if !ok {
542
543
544 sysFreeOS(got, physPageSize)
545 }
546 return
547 }
548
549 func GetNextArenaHint() uintptr {
550 return mheap_.arenaHints.addr
551 }
552
553 type G = g
554
555 type Sudog = sudog
556
557 func Getg() *G {
558 return getg()
559 }
560
561 func Goid() uint64 {
562 return getg().goid
563 }
564
565 func GIsWaitingOnMutex(gp *G) bool {
566 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
567 }
568
569 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
570
571
572 func PanicForTesting(b []byte, i int) byte {
573 return unexportedPanicForTesting(b, i)
574 }
575
576
577 func unexportedPanicForTesting(b []byte, i int) byte {
578 return b[i]
579 }
580
581 func G0StackOverflow() {
582 systemstack(func() {
583 g0 := getg()
584 sp := sys.GetCallerSP()
585
586
587
588 g0.stack.lo = sp - 4096 - stackSystem
589 g0.stackguard0 = g0.stack.lo + stackGuard
590 g0.stackguard1 = g0.stackguard0
591
592 stackOverflow(nil)
593 })
594 }
595
596 func stackOverflow(x *byte) {
597 var buf [256]byte
598 stackOverflow(&buf[0])
599 }
600
601 func RunGetgThreadSwitchTest() {
602
603
604
605
606
607
608 ch := make(chan int)
609 go func(ch chan int) {
610 ch <- 5
611 LockOSThread()
612 }(ch)
613
614 g1 := getg()
615
616
617
618
619
620 <-ch
621
622 g2 := getg()
623 if g1 != g2 {
624 panic("g1 != g2")
625 }
626
627
628
629 g3 := getg()
630 if g1 != g3 {
631 panic("g1 != g3")
632 }
633 }
634
635 const (
636 PageSize = pageSize
637 PallocChunkPages = pallocChunkPages
638 PageAlloc64Bit = pageAlloc64Bit
639 PallocSumBytes = pallocSumBytes
640 )
641
642
643 type PallocSum pallocSum
644
645 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
646 func (m PallocSum) Start() uint { return pallocSum(m).start() }
647 func (m PallocSum) Max() uint { return pallocSum(m).max() }
648 func (m PallocSum) End() uint { return pallocSum(m).end() }
649
650
651 type PallocBits pallocBits
652
653 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
654 return (*pallocBits)(b).find(npages, searchIdx)
655 }
656 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
657 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
658 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
659 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
660
661
662
663 func SummarizeSlow(b *PallocBits) PallocSum {
664 var start, most, end uint
665
666 const N = uint(len(b)) * 64
667 for start < N && (*pageBits)(b).get(start) == 0 {
668 start++
669 }
670 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
671 end++
672 }
673 run := uint(0)
674 for i := uint(0); i < N; i++ {
675 if (*pageBits)(b).get(i) == 0 {
676 run++
677 } else {
678 run = 0
679 }
680 most = max(most, run)
681 }
682 return PackPallocSum(start, most, end)
683 }
684
685
686 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
687
688
689
690 func DiffPallocBits(a, b *PallocBits) []BitRange {
691 ba := (*pageBits)(a)
692 bb := (*pageBits)(b)
693
694 var d []BitRange
695 base, size := uint(0), uint(0)
696 for i := uint(0); i < uint(len(ba))*64; i++ {
697 if ba.get(i) != bb.get(i) {
698 if size == 0 {
699 base = i
700 }
701 size++
702 } else {
703 if size != 0 {
704 d = append(d, BitRange{base, size})
705 }
706 size = 0
707 }
708 }
709 if size != 0 {
710 d = append(d, BitRange{base, size})
711 }
712 return d
713 }
714
715
716
717
718 func StringifyPallocBits(b *PallocBits, r BitRange) string {
719 str := ""
720 for j := r.I; j < r.I+r.N; j++ {
721 if (*pageBits)(b).get(j) != 0 {
722 str += "1"
723 } else {
724 str += "0"
725 }
726 }
727 return str
728 }
729
730
731 type PallocData pallocData
732
733 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
734 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
735 }
736 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
737 func (d *PallocData) ScavengedSetRange(i, n uint) {
738 (*pallocData)(d).scavenged.setRange(i, n)
739 }
740 func (d *PallocData) PallocBits() *PallocBits {
741 return (*PallocBits)(&(*pallocData)(d).pallocBits)
742 }
743 func (d *PallocData) Scavenged() *PallocBits {
744 return (*PallocBits)(&(*pallocData)(d).scavenged)
745 }
746
747
748 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
749
750
751 type PageCache pageCache
752
753 const PageCachePages = pageCachePages
754
755 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
756 return PageCache(pageCache{base: base, cache: cache, scav: scav})
757 }
758 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
759 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
760 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
761 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
762 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
763 return (*pageCache)(c).alloc(npages)
764 }
765 func (c *PageCache) Flush(s *PageAlloc) {
766 cp := (*pageCache)(c)
767 sp := (*pageAlloc)(s)
768
769 systemstack(func() {
770
771
772 lock(sp.mheapLock)
773 cp.flush(sp)
774 unlock(sp.mheapLock)
775 })
776 }
777
778
779 type ChunkIdx chunkIdx
780
781
782
783 type PageAlloc pageAlloc
784
785 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
786 pp := (*pageAlloc)(p)
787
788 var addr, scav uintptr
789 systemstack(func() {
790
791
792 lock(pp.mheapLock)
793 addr, scav = pp.alloc(npages)
794 unlock(pp.mheapLock)
795 })
796 return addr, scav
797 }
798 func (p *PageAlloc) AllocToCache() PageCache {
799 pp := (*pageAlloc)(p)
800
801 var c PageCache
802 systemstack(func() {
803
804
805 lock(pp.mheapLock)
806 c = PageCache(pp.allocToCache())
807 unlock(pp.mheapLock)
808 })
809 return c
810 }
811 func (p *PageAlloc) Free(base, npages uintptr) {
812 pp := (*pageAlloc)(p)
813
814 systemstack(func() {
815
816
817 lock(pp.mheapLock)
818 pp.free(base, npages)
819 unlock(pp.mheapLock)
820 })
821 }
822 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
823 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
824 }
825 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
826 pp := (*pageAlloc)(p)
827 systemstack(func() {
828 r = pp.scavenge(nbytes, nil, true)
829 })
830 return
831 }
832 func (p *PageAlloc) InUse() []AddrRange {
833 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
834 for _, r := range p.inUse.ranges {
835 ranges = append(ranges, AddrRange{r})
836 }
837 return ranges
838 }
839
840
841 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
842 ci := chunkIdx(i)
843 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
844 }
845
846
847 type AddrRange struct {
848 addrRange
849 }
850
851
852 func MakeAddrRange(base, limit uintptr) AddrRange {
853 return AddrRange{makeAddrRange(base, limit)}
854 }
855
856
857 func (a AddrRange) Base() uintptr {
858 return a.addrRange.base.addr()
859 }
860
861
862 func (a AddrRange) Limit() uintptr {
863 return a.addrRange.limit.addr()
864 }
865
866
867 func (a AddrRange) Equals(b AddrRange) bool {
868 return a == b
869 }
870
871
872 func (a AddrRange) Size() uintptr {
873 return a.addrRange.size()
874 }
875
876
877
878
879
880 var testSysStat = &memstats.other_sys
881
882
883 type AddrRanges struct {
884 addrRanges
885 mutable bool
886 }
887
888
889
890
891
892
893
894
895
896
897 func NewAddrRanges() AddrRanges {
898 r := addrRanges{}
899 r.init(testSysStat)
900 return AddrRanges{r, true}
901 }
902
903
904
905
906
907
908 func MakeAddrRanges(a ...AddrRange) AddrRanges {
909
910
911
912
913
914 ranges := make([]addrRange, 0, len(a))
915 total := uintptr(0)
916 for _, r := range a {
917 ranges = append(ranges, r.addrRange)
918 total += r.Size()
919 }
920 return AddrRanges{addrRanges{
921 ranges: ranges,
922 totalBytes: total,
923 sysStat: testSysStat,
924 }, false}
925 }
926
927
928
929 func (a *AddrRanges) Ranges() []AddrRange {
930 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
931 for _, r := range a.addrRanges.ranges {
932 result = append(result, AddrRange{r})
933 }
934 return result
935 }
936
937
938
939 func (a *AddrRanges) FindSucc(base uintptr) int {
940 return a.findSucc(base)
941 }
942
943
944
945
946
947 func (a *AddrRanges) Add(r AddrRange) {
948 if !a.mutable {
949 throw("attempt to mutate immutable AddrRanges")
950 }
951 a.add(r.addrRange)
952 }
953
954
955 func (a *AddrRanges) TotalBytes() uintptr {
956 return a.addrRanges.totalBytes
957 }
958
959
960 type BitRange struct {
961 I, N uint
962 }
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
979 p := new(pageAlloc)
980
981
982 p.init(new(mutex), testSysStat, true)
983 lockInit(p.mheapLock, lockRankMheap)
984 for i, init := range chunks {
985 addr := chunkBase(chunkIdx(i))
986
987
988 systemstack(func() {
989 lock(p.mheapLock)
990 p.grow(addr, pallocChunkBytes)
991 unlock(p.mheapLock)
992 })
993
994
995 ci := chunkIndex(addr)
996 chunk := p.chunkOf(ci)
997
998
999 chunk.scavenged.clearRange(0, pallocChunkPages)
1000
1001
1002
1003
1004 p.scav.index.alloc(ci, pallocChunkPages)
1005 p.scav.index.free(ci, 0, pallocChunkPages)
1006
1007
1008 if scav != nil {
1009 if scvg, ok := scav[i]; ok {
1010 for _, s := range scvg {
1011
1012
1013 if s.N != 0 {
1014 chunk.scavenged.setRange(s.I, s.N)
1015 }
1016 }
1017 }
1018 }
1019
1020
1021 for _, s := range init {
1022
1023
1024 if s.N != 0 {
1025 chunk.allocRange(s.I, s.N)
1026
1027
1028 p.scav.index.alloc(ci, s.N)
1029 }
1030 }
1031
1032
1033 systemstack(func() {
1034 lock(p.mheapLock)
1035 p.update(addr, pallocChunkPages, false, false)
1036 unlock(p.mheapLock)
1037 })
1038 }
1039
1040 return (*PageAlloc)(p)
1041 }
1042
1043
1044
1045
1046 func FreePageAlloc(pp *PageAlloc) {
1047 p := (*pageAlloc)(pp)
1048
1049
1050 if pageAlloc64Bit != 0 {
1051 for l := 0; l < summaryLevels; l++ {
1052 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1053 }
1054 } else {
1055 resSize := uintptr(0)
1056 for _, s := range p.summary {
1057 resSize += uintptr(cap(s)) * pallocSumBytes
1058 }
1059 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1060 }
1061
1062
1063 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1064
1065
1066
1067
1068
1069 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1070 testSysStat.add(-int64(p.summaryMappedReady))
1071
1072
1073 for i := range p.chunks {
1074 if x := p.chunks[i]; x != nil {
1075 p.chunks[i] = nil
1076
1077 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1078 }
1079 }
1080 }
1081
1082
1083
1084
1085
1086
1087
1088 var BaseChunkIdx = func() ChunkIdx {
1089 var prefix uintptr
1090 if pageAlloc64Bit != 0 {
1091 prefix = 0xc000
1092 } else {
1093 prefix = 0x100
1094 }
1095 baseAddr := prefix * pallocChunkBytes
1096 if goos.IsAix != 0 {
1097 baseAddr += arenaBaseOffset
1098 }
1099 return ChunkIdx(chunkIndex(baseAddr))
1100 }()
1101
1102
1103
1104 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1105 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1106 }
1107
1108 type BitsMismatch struct {
1109 Base uintptr
1110 Got, Want uint64
1111 }
1112
1113 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1114 ok = true
1115
1116
1117 systemstack(func() {
1118 getg().m.mallocing++
1119
1120
1121 lock(&mheap_.lock)
1122 chunkLoop:
1123 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1124 chunk := mheap_.pages.tryChunkOf(i)
1125 if chunk == nil {
1126 continue
1127 }
1128 for j := 0; j < pallocChunkPages/64; j++ {
1129
1130
1131
1132
1133
1134 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1135 got := chunk.scavenged[j]
1136 if want != got {
1137 ok = false
1138 if n >= len(mismatches) {
1139 break chunkLoop
1140 }
1141 mismatches[n] = BitsMismatch{
1142 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1143 Got: got,
1144 Want: want,
1145 }
1146 n++
1147 }
1148 }
1149 }
1150 unlock(&mheap_.lock)
1151
1152 getg().m.mallocing--
1153 })
1154 return
1155 }
1156
1157 func PageCachePagesLeaked() (leaked uintptr) {
1158 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1159
1160
1161 deadp := allp[len(allp):cap(allp)]
1162 for _, p := range deadp {
1163
1164
1165 if p != nil {
1166 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1167 }
1168 }
1169
1170 startTheWorld(stw)
1171 return
1172 }
1173
1174 var ProcYield = procyield
1175 var OSYield = osyield
1176
1177 type Mutex = mutex
1178
1179 var Lock = lock
1180 var Unlock = unlock
1181
1182 var MutexContended = mutexContended
1183
1184 func SemRootLock(addr *uint32) *mutex {
1185 root := semtable.rootFor(addr)
1186 return &root.lock
1187 }
1188
1189 var Semacquire = semacquire
1190 var Semrelease1 = semrelease1
1191
1192 func SemNwait(addr *uint32) uint32 {
1193 root := semtable.rootFor(addr)
1194 return root.nwait.Load()
1195 }
1196
1197 const SemTableSize = semTabSize
1198
1199
1200 type SemTable struct {
1201 semTable
1202 }
1203
1204
1205 func (t *SemTable) Enqueue(addr *uint32) {
1206 s := acquireSudog()
1207 s.releasetime = 0
1208 s.acquiretime = 0
1209 s.ticket = 0
1210 t.semTable.rootFor(addr).queue(addr, s, false)
1211 }
1212
1213
1214
1215
1216 func (t *SemTable) Dequeue(addr *uint32) bool {
1217 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1218 if s != nil {
1219 releaseSudog(s)
1220 return true
1221 }
1222 return false
1223 }
1224
1225
1226 type MSpan mspan
1227
1228
1229 func AllocMSpan() *MSpan {
1230 var s *mspan
1231 systemstack(func() {
1232 lock(&mheap_.lock)
1233 s = (*mspan)(mheap_.spanalloc.alloc())
1234 unlock(&mheap_.lock)
1235 })
1236 return (*MSpan)(s)
1237 }
1238
1239
1240 func FreeMSpan(s *MSpan) {
1241 systemstack(func() {
1242 lock(&mheap_.lock)
1243 mheap_.spanalloc.free(unsafe.Pointer(s))
1244 unlock(&mheap_.lock)
1245 })
1246 }
1247
1248 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1249 s := (*mspan)(ms)
1250 s.nelems = uint16(len(bits) * 8)
1251 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1252 result := s.countAlloc()
1253 s.gcmarkBits = nil
1254 return result
1255 }
1256
1257 const (
1258 TimeHistSubBucketBits = timeHistSubBucketBits
1259 TimeHistNumSubBuckets = timeHistNumSubBuckets
1260 TimeHistNumBuckets = timeHistNumBuckets
1261 TimeHistMinBucketBits = timeHistMinBucketBits
1262 TimeHistMaxBucketBits = timeHistMaxBucketBits
1263 )
1264
1265 type TimeHistogram timeHistogram
1266
1267
1268
1269
1270
1271 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1272 t := (*timeHistogram)(th)
1273 if bucket < 0 {
1274 return t.underflow.Load(), false
1275 }
1276 i := bucket*TimeHistNumSubBuckets + subBucket
1277 if i >= len(t.counts) {
1278 return t.overflow.Load(), false
1279 }
1280 return t.counts[i].Load(), true
1281 }
1282
1283 func (th *TimeHistogram) Record(duration int64) {
1284 (*timeHistogram)(th).record(duration)
1285 }
1286
1287 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1288
1289 func SetIntArgRegs(a int) int {
1290 lock(&finlock)
1291 old := intArgRegs
1292 if a >= 0 {
1293 intArgRegs = a
1294 }
1295 unlock(&finlock)
1296 return old
1297 }
1298
1299 func FinalizerGAsleep() bool {
1300 return fingStatus.Load()&fingWait != 0
1301 }
1302
1303
1304
1305
1306 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1307
1308
1309
1310 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1311 return gcTestIsReachable(ptrs...)
1312 }
1313
1314
1315
1316
1317
1318
1319
1320 func GCTestPointerClass(p unsafe.Pointer) string {
1321 return gcTestPointerClass(p)
1322 }
1323
1324 const Raceenabled = raceenabled
1325
1326 const (
1327 GCBackgroundUtilization = gcBackgroundUtilization
1328 GCGoalUtilization = gcGoalUtilization
1329 DefaultHeapMinimum = defaultHeapMinimum
1330 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1331 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1332 )
1333
1334 type GCController struct {
1335 gcControllerState
1336 }
1337
1338 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1339
1340
1341
1342
1343 g := Escape(new(GCController))
1344 g.gcControllerState.test = true
1345 g.init(int32(gcPercent), memoryLimit)
1346 return g
1347 }
1348
1349 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1350 trigger, _ := c.trigger()
1351 if c.heapMarked > trigger {
1352 trigger = c.heapMarked
1353 }
1354 c.maxStackScan.Store(stackSize)
1355 c.globalsScan.Store(globalsSize)
1356 c.heapLive.Store(trigger)
1357 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1358 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1359 }
1360
1361 func (c *GCController) AssistWorkPerByte() float64 {
1362 return c.assistWorkPerByte.Load()
1363 }
1364
1365 func (c *GCController) HeapGoal() uint64 {
1366 return c.heapGoal()
1367 }
1368
1369 func (c *GCController) HeapLive() uint64 {
1370 return c.heapLive.Load()
1371 }
1372
1373 func (c *GCController) HeapMarked() uint64 {
1374 return c.heapMarked
1375 }
1376
1377 func (c *GCController) Triggered() uint64 {
1378 return c.triggered
1379 }
1380
1381 type GCControllerReviseDelta struct {
1382 HeapLive int64
1383 HeapScan int64
1384 HeapScanWork int64
1385 StackScanWork int64
1386 GlobalsScanWork int64
1387 }
1388
1389 func (c *GCController) Revise(d GCControllerReviseDelta) {
1390 c.heapLive.Add(d.HeapLive)
1391 c.heapScan.Add(d.HeapScan)
1392 c.heapScanWork.Add(d.HeapScanWork)
1393 c.stackScanWork.Add(d.StackScanWork)
1394 c.globalsScanWork.Add(d.GlobalsScanWork)
1395 c.revise()
1396 }
1397
1398 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1399 c.assistTime.Store(assistTime)
1400 c.endCycle(elapsed, gomaxprocs, false)
1401 c.resetLive(bytesMarked)
1402 c.commit(false)
1403 }
1404
1405 func (c *GCController) AddIdleMarkWorker() bool {
1406 return c.addIdleMarkWorker()
1407 }
1408
1409 func (c *GCController) NeedIdleMarkWorker() bool {
1410 return c.needIdleMarkWorker()
1411 }
1412
1413 func (c *GCController) RemoveIdleMarkWorker() {
1414 c.removeIdleMarkWorker()
1415 }
1416
1417 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1418 c.setMaxIdleMarkWorkers(max)
1419 }
1420
1421 var alwaysFalse bool
1422 var escapeSink any
1423
1424 func Escape[T any](x T) T {
1425 if alwaysFalse {
1426 escapeSink = x
1427 }
1428 return x
1429 }
1430
1431
1432 func Acquirem() {
1433 acquirem()
1434 }
1435
1436 func Releasem() {
1437 releasem(getg().m)
1438 }
1439
1440 var Timediv = timediv
1441
1442 type PIController struct {
1443 piController
1444 }
1445
1446 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1447 return &PIController{piController{
1448 kp: kp,
1449 ti: ti,
1450 tt: tt,
1451 min: min,
1452 max: max,
1453 }}
1454 }
1455
1456 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1457 return c.piController.next(input, setpoint, period)
1458 }
1459
1460 const (
1461 CapacityPerProc = capacityPerProc
1462 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1463 )
1464
1465 type GCCPULimiter struct {
1466 limiter gcCPULimiterState
1467 }
1468
1469 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1470
1471
1472
1473
1474 l := Escape(new(GCCPULimiter))
1475 l.limiter.test = true
1476 l.limiter.resetCapacity(now, gomaxprocs)
1477 return l
1478 }
1479
1480 func (l *GCCPULimiter) Fill() uint64 {
1481 return l.limiter.bucket.fill
1482 }
1483
1484 func (l *GCCPULimiter) Capacity() uint64 {
1485 return l.limiter.bucket.capacity
1486 }
1487
1488 func (l *GCCPULimiter) Overflow() uint64 {
1489 return l.limiter.overflow
1490 }
1491
1492 func (l *GCCPULimiter) Limiting() bool {
1493 return l.limiter.limiting()
1494 }
1495
1496 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1497 return l.limiter.needUpdate(now)
1498 }
1499
1500 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1501 l.limiter.startGCTransition(enableGC, now)
1502 }
1503
1504 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1505 l.limiter.finishGCTransition(now)
1506 }
1507
1508 func (l *GCCPULimiter) Update(now int64) {
1509 l.limiter.update(now)
1510 }
1511
1512 func (l *GCCPULimiter) AddAssistTime(t int64) {
1513 l.limiter.addAssistTime(t)
1514 }
1515
1516 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1517 l.limiter.resetCapacity(now, nprocs)
1518 }
1519
1520 const ScavengePercent = scavengePercent
1521
1522 type Scavenger struct {
1523 Sleep func(int64) int64
1524 Scavenge func(uintptr) (uintptr, int64)
1525 ShouldStop func() bool
1526 GoMaxProcs func() int32
1527
1528 released atomic.Uintptr
1529 scavenger scavengerState
1530 stop chan<- struct{}
1531 done <-chan struct{}
1532 }
1533
1534 func (s *Scavenger) Start() {
1535 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1536 panic("must populate all stubs")
1537 }
1538
1539
1540 s.scavenger.sleepStub = s.Sleep
1541 s.scavenger.scavenge = s.Scavenge
1542 s.scavenger.shouldStop = s.ShouldStop
1543 s.scavenger.gomaxprocs = s.GoMaxProcs
1544
1545
1546 stop := make(chan struct{})
1547 s.stop = stop
1548 done := make(chan struct{})
1549 s.done = done
1550 go func() {
1551
1552 s.scavenger.init()
1553 s.scavenger.park()
1554 for {
1555 select {
1556 case <-stop:
1557 close(done)
1558 return
1559 default:
1560 }
1561 released, workTime := s.scavenger.run()
1562 if released == 0 {
1563 s.scavenger.park()
1564 continue
1565 }
1566 s.released.Add(released)
1567 s.scavenger.sleep(workTime)
1568 }
1569 }()
1570 if !s.BlockUntilParked(1e9 ) {
1571 panic("timed out waiting for scavenger to get ready")
1572 }
1573 }
1574
1575
1576
1577
1578
1579
1580
1581 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1582
1583
1584
1585
1586
1587 start := nanotime()
1588 for nanotime()-start < timeout {
1589 lock(&s.scavenger.lock)
1590 parked := s.scavenger.parked
1591 unlock(&s.scavenger.lock)
1592 if parked {
1593 return true
1594 }
1595 Gosched()
1596 }
1597 return false
1598 }
1599
1600
1601 func (s *Scavenger) Released() uintptr {
1602 return s.released.Load()
1603 }
1604
1605
1606 func (s *Scavenger) Wake() {
1607 s.scavenger.wake()
1608 }
1609
1610
1611
1612 func (s *Scavenger) Stop() {
1613 lock(&s.scavenger.lock)
1614 parked := s.scavenger.parked
1615 unlock(&s.scavenger.lock)
1616 if !parked {
1617 panic("tried to clean up scavenger that is not parked")
1618 }
1619 close(s.stop)
1620 s.Wake()
1621 <-s.done
1622 }
1623
1624 type ScavengeIndex struct {
1625 i scavengeIndex
1626 }
1627
1628 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1629 s := new(ScavengeIndex)
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 s.i.chunks = make([]atomicScavChunkData, max)
1642 s.i.min.Store(uintptr(min))
1643 s.i.max.Store(uintptr(max))
1644 s.i.minHeapIdx.Store(uintptr(min))
1645 s.i.test = true
1646 return s
1647 }
1648
1649 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1650 ci, off := s.i.find(force)
1651 return ChunkIdx(ci), off
1652 }
1653
1654 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1655 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1656 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1657
1658 if sc == ec {
1659
1660 s.i.alloc(sc, ei+1-si)
1661 } else {
1662
1663 s.i.alloc(sc, pallocChunkPages-si)
1664 for c := sc + 1; c < ec; c++ {
1665 s.i.alloc(c, pallocChunkPages)
1666 }
1667 s.i.alloc(ec, ei+1)
1668 }
1669 }
1670
1671 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1672 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1673 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1674
1675 if sc == ec {
1676
1677 s.i.free(sc, si, ei+1-si)
1678 } else {
1679
1680 s.i.free(sc, si, pallocChunkPages-si)
1681 for c := sc + 1; c < ec; c++ {
1682 s.i.free(c, 0, pallocChunkPages)
1683 }
1684 s.i.free(ec, 0, ei+1)
1685 }
1686 }
1687
1688 func (s *ScavengeIndex) ResetSearchAddrs() {
1689 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1690 addr, marked := a.Load()
1691 if marked {
1692 a.StoreUnmark(addr, addr)
1693 }
1694 a.Clear()
1695 }
1696 s.i.freeHWM = minOffAddr
1697 }
1698
1699 func (s *ScavengeIndex) NextGen() {
1700 s.i.nextGen()
1701 }
1702
1703 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1704 s.i.setEmpty(chunkIdx(ci))
1705 }
1706
1707 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1708 sc0 := scavChunkData{
1709 gen: gen,
1710 inUse: inUse,
1711 lastInUse: lastInUse,
1712 scavChunkFlags: scavChunkFlags(flags),
1713 }
1714 scp := sc0.pack()
1715 sc1 := unpackScavChunkData(scp)
1716 return sc0 == sc1
1717 }
1718
1719 const GTrackingPeriod = gTrackingPeriod
1720
1721 var ZeroBase = unsafe.Pointer(&zerobase)
1722
1723 const UserArenaChunkBytes = userArenaChunkBytes
1724
1725 type UserArena struct {
1726 arena *userArena
1727 }
1728
1729 func NewUserArena() *UserArena {
1730 return &UserArena{newUserArena()}
1731 }
1732
1733 func (a *UserArena) New(out *any) {
1734 i := efaceOf(out)
1735 typ := i._type
1736 if typ.Kind_&abi.KindMask != abi.Pointer {
1737 panic("new result of non-ptr type")
1738 }
1739 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1740 i.data = a.arena.new(typ)
1741 }
1742
1743 func (a *UserArena) Slice(sl any, cap int) {
1744 a.arena.slice(sl, cap)
1745 }
1746
1747 func (a *UserArena) Free() {
1748 a.arena.free()
1749 }
1750
1751 func GlobalWaitingArenaChunks() int {
1752 n := 0
1753 systemstack(func() {
1754 lock(&mheap_.lock)
1755 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1756 n++
1757 }
1758 unlock(&mheap_.lock)
1759 })
1760 return n
1761 }
1762
1763 func UserArenaClone[T any](s T) T {
1764 return arena_heapify(s).(T)
1765 }
1766
1767 var AlignUp = alignUp
1768
1769 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1770 return blockUntilEmptyFinalizerQueue(timeout)
1771 }
1772
1773 func FrameStartLine(f *Frame) int {
1774 return f.startLine
1775 }
1776
1777
1778
1779 func PersistentAlloc(n uintptr) unsafe.Pointer {
1780 return persistentalloc(n, 0, &memstats.other_sys)
1781 }
1782
1783
1784
1785 func FPCallers(pcBuf []uintptr) int {
1786 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1787 }
1788
1789 const FramePointerEnabled = framepointer_enabled
1790
1791 var (
1792 IsPinned = isPinned
1793 GetPinCounter = pinnerGetPinCounter
1794 )
1795
1796 func SetPinnerLeakPanic(f func()) {
1797 pinnerLeakPanic = f
1798 }
1799 func GetPinnerLeakPanic() func() {
1800 return pinnerLeakPanic
1801 }
1802
1803 var testUintptr uintptr
1804
1805 func MyGenericFunc[T any]() {
1806 systemstack(func() {
1807 testUintptr = 4
1808 })
1809 }
1810
1811 func UnsafePoint(pc uintptr) bool {
1812 fi := findfunc(pc)
1813 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1814 switch v {
1815 case abi.UnsafePointUnsafe:
1816 return true
1817 case abi.UnsafePointSafe:
1818 return false
1819 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1820
1821
1822 return false
1823 default:
1824 var buf [20]byte
1825 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1826 }
1827 }
1828
1829 type TraceMap struct {
1830 traceMap
1831 }
1832
1833 func (m *TraceMap) PutString(s string) (uint64, bool) {
1834 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1835 }
1836
1837 func (m *TraceMap) Reset() {
1838 m.traceMap.reset()
1839 }
1840
1841 func SetSpinInGCMarkDone(spin bool) {
1842 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1843 }
1844
1845 func GCMarkDoneRestarted() bool {
1846
1847 mp := acquirem()
1848 if gcphase != _GCoff {
1849 releasem(mp)
1850 return false
1851 }
1852 restarted := gcDebugMarkDone.restartedDueTo27993
1853 releasem(mp)
1854 return restarted
1855 }
1856
1857 func GCMarkDoneResetRestartFlag() {
1858 mp := acquirem()
1859 for gcphase != _GCoff {
1860 releasem(mp)
1861 Gosched()
1862 mp = acquirem()
1863 }
1864 gcDebugMarkDone.restartedDueTo27993 = false
1865 releasem(mp)
1866 }
1867
View as plain text