Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/gc"
15 "internal/runtime/sys"
16 "math/bits"
17 "unsafe"
18 )
19
20
69
70 const (
71
72
73
74
75 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
76
77
78 stackMin = 2048
79
80
81
82 fixedStack0 = stackMin + stackSystem
83 fixedStack1 = fixedStack0 - 1
84 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
85 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
86 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
87 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
88 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
89 fixedStack = fixedStack6 + 1
90
91
92
93
94 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
95
96
97
98
99
100
101
102 stackGuard = stackNosplit + stackSystem + abi.StackSmall
103 )
104
105 const (
106
107
108
109
110
111 stackDebug = 0
112 stackFromSystem = 0
113 stackFaultOnFree = 0
114 stackNoCache = 0
115
116
117 debugCheckBP = false
118 )
119
120 var (
121 stackPoisonCopy = 0
122 )
123
124 const (
125 uintptrMask = 1<<(8*goarch.PtrSize) - 1
126
127
128
129
130
131
132
133 stackPreempt = uintptrMask & -1314
134
135
136
137 stackFork = uintptrMask & -1234
138
139
140
141 stackForceMove = uintptrMask & -275
142
143
144 stackPoisonMin = uintptrMask & -4096
145 )
146
147
148
149
150
151
152
153 var stackpool [_NumStackOrders]struct {
154 item stackpoolItem
155 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
156 }
157
158 type stackpoolItem struct {
159 _ sys.NotInHeap
160 mu mutex
161 span mSpanList
162 }
163
164
165 var stackLarge struct {
166 lock mutex
167 free [heapAddrBits - gc.PageShift]mSpanList
168 }
169
170 func stackinit() {
171 if _StackCacheSize&pageMask != 0 {
172 throw("cache size must be a multiple of page size")
173 }
174 for i := range stackpool {
175 stackpool[i].item.span.init()
176 lockInit(&stackpool[i].item.mu, lockRankStackpool)
177 }
178 for i := range stackLarge.free {
179 stackLarge.free[i].init()
180 lockInit(&stackLarge.lock, lockRankStackLarge)
181 }
182 }
183
184
185 func stacklog2(n uintptr) int {
186 if n == 0 {
187 return 0
188 }
189 return bits.Len64(uint64(n))
190 }
191
192
193
194 func stackpoolalloc(order uint8) gclinkptr {
195 list := &stackpool[order].item.span
196 s := list.first
197 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
198 if s == nil {
199
200 s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
201 if s == nil {
202 throw("out of memory")
203 }
204 if s.allocCount != 0 {
205 throw("bad allocCount")
206 }
207 if s.manualFreeList.ptr() != nil {
208 throw("bad manualFreeList")
209 }
210 osStackAlloc(s)
211 s.elemsize = fixedStack << order
212 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
213 x := gclinkptr(s.base() + i)
214 if valgrindenabled {
215
216
217
218
219 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
220 }
221 x.ptr().next = s.manualFreeList
222 s.manualFreeList = x
223 }
224 list.insert(s)
225 }
226 x := s.manualFreeList
227 if x.ptr() == nil {
228 throw("span has no free stacks")
229 }
230 s.manualFreeList = x.ptr().next
231 s.allocCount++
232 if s.manualFreeList.ptr() == nil {
233
234 list.remove(s)
235 }
236 return x
237 }
238
239
240 func stackpoolfree(x gclinkptr, order uint8) {
241 s := spanOfUnchecked(uintptr(x))
242 if s.state.get() != mSpanManual {
243 throw("freeing stack not in a stack span")
244 }
245 if s.manualFreeList.ptr() == nil {
246
247 stackpool[order].item.span.insert(s)
248 }
249 x.ptr().next = s.manualFreeList
250 s.manualFreeList = x
251 s.allocCount--
252 if gcphase == _GCoff && s.allocCount == 0 {
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 stackpool[order].item.span.remove(s)
269 s.manualFreeList = 0
270 osStackFree(s)
271 mheap_.freeManual(s, spanAllocStack)
272 }
273 }
274
275
276
277
278
279 func stackcacherefill(c *mcache, order uint8) {
280 if stackDebug >= 1 {
281 print("stackcacherefill order=", order, "\n")
282 }
283
284
285
286 var list gclinkptr
287 var size uintptr
288 lock(&stackpool[order].item.mu)
289 for size < _StackCacheSize/2 {
290 x := stackpoolalloc(order)
291 x.ptr().next = list
292 list = x
293 size += fixedStack << order
294 }
295 unlock(&stackpool[order].item.mu)
296 c.stackcache[order].list = list
297 c.stackcache[order].size = size
298 }
299
300
301 func stackcacherelease(c *mcache, order uint8) {
302 if stackDebug >= 1 {
303 print("stackcacherelease order=", order, "\n")
304 }
305 x := c.stackcache[order].list
306 size := c.stackcache[order].size
307 lock(&stackpool[order].item.mu)
308 for size > _StackCacheSize/2 {
309 y := x.ptr().next
310 stackpoolfree(x, order)
311 x = y
312 size -= fixedStack << order
313 }
314 unlock(&stackpool[order].item.mu)
315 c.stackcache[order].list = x
316 c.stackcache[order].size = size
317 }
318
319
320 func stackcache_clear(c *mcache) {
321 if stackDebug >= 1 {
322 print("stackcache clear\n")
323 }
324 for order := uint8(0); order < _NumStackOrders; order++ {
325 lock(&stackpool[order].item.mu)
326 x := c.stackcache[order].list
327 for x.ptr() != nil {
328 y := x.ptr().next
329 stackpoolfree(x, order)
330 x = y
331 }
332 c.stackcache[order].list = 0
333 c.stackcache[order].size = 0
334 unlock(&stackpool[order].item.mu)
335 }
336 }
337
338
339
340
341
342
343
344 func stackalloc(n uint32) stack {
345
346
347
348 thisg := getg()
349 if thisg != thisg.m.g0 {
350 throw("stackalloc not on scheduler stack")
351 }
352 if n&(n-1) != 0 {
353 throw("stack size not a power of 2")
354 }
355 if stackDebug >= 1 {
356 print("stackalloc ", n, "\n")
357 }
358
359 if debug.efence != 0 || stackFromSystem != 0 {
360 n = uint32(alignUp(uintptr(n), physPageSize))
361 v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
362 if v == nil {
363 throw("out of memory (stackalloc)")
364 }
365 return stack{uintptr(v), uintptr(v) + uintptr(n)}
366 }
367
368
369
370
371 var v unsafe.Pointer
372 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
373 order := uint8(0)
374 n2 := n
375 for n2 > fixedStack {
376 order++
377 n2 >>= 1
378 }
379 var x gclinkptr
380 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
381
382
383
384
385 lock(&stackpool[order].item.mu)
386 x = stackpoolalloc(order)
387 unlock(&stackpool[order].item.mu)
388 } else {
389 c := thisg.m.p.ptr().mcache
390 x = c.stackcache[order].list
391 if x.ptr() == nil {
392 stackcacherefill(c, order)
393 x = c.stackcache[order].list
394 }
395 c.stackcache[order].list = x.ptr().next
396 c.stackcache[order].size -= uintptr(n)
397 }
398 if valgrindenabled {
399
400
401
402 valgrindFree(unsafe.Pointer(x.ptr()))
403 }
404 v = unsafe.Pointer(x)
405 } else {
406 var s *mspan
407 npage := uintptr(n) >> gc.PageShift
408 log2npage := stacklog2(npage)
409
410
411 lock(&stackLarge.lock)
412 if !stackLarge.free[log2npage].isEmpty() {
413 s = stackLarge.free[log2npage].first
414 stackLarge.free[log2npage].remove(s)
415 }
416 unlock(&stackLarge.lock)
417
418 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
419
420 if s == nil {
421
422 s = mheap_.allocManual(npage, spanAllocStack)
423 if s == nil {
424 throw("out of memory")
425 }
426 osStackAlloc(s)
427 s.elemsize = uintptr(n)
428 }
429 v = unsafe.Pointer(s.base())
430 }
431
432 if traceAllocFreeEnabled() {
433 trace := traceAcquire()
434 if trace.ok() {
435 trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
436 traceRelease(trace)
437 }
438 }
439 if raceenabled {
440 racemalloc(v, uintptr(n))
441 }
442 if msanenabled {
443 msanmalloc(v, uintptr(n))
444 }
445 if asanenabled {
446 asanunpoison(v, uintptr(n))
447 }
448 if valgrindenabled {
449 valgrindMalloc(v, uintptr(n))
450 }
451 if stackDebug >= 1 {
452 print(" allocated ", v, "\n")
453 }
454 return stack{uintptr(v), uintptr(v) + uintptr(n)}
455 }
456
457
458
459
460
461
462
463 func stackfree(stk stack) {
464 gp := getg()
465 v := unsafe.Pointer(stk.lo)
466 n := stk.hi - stk.lo
467 if n&(n-1) != 0 {
468 throw("stack not a power of 2")
469 }
470 if stk.lo+n < stk.hi {
471 throw("bad stack size")
472 }
473 if stackDebug >= 1 {
474 println("stackfree", v, n)
475 memclrNoHeapPointers(v, n)
476 }
477 if debug.efence != 0 || stackFromSystem != 0 {
478 if debug.efence != 0 || stackFaultOnFree != 0 {
479 sysFault(v, n)
480 } else {
481 sysFree(v, n, &memstats.stacks_sys)
482 }
483 return
484 }
485 if traceAllocFreeEnabled() {
486 trace := traceAcquire()
487 if trace.ok() {
488 trace.GoroutineStackFree(uintptr(v))
489 traceRelease(trace)
490 }
491 }
492 if msanenabled {
493 msanfree(v, n)
494 }
495 if asanenabled {
496 asanpoison(v, n)
497 }
498 if valgrindenabled {
499 valgrindFree(v)
500 }
501 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
502 order := uint8(0)
503 n2 := n
504 for n2 > fixedStack {
505 order++
506 n2 >>= 1
507 }
508 x := gclinkptr(v)
509 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
510 lock(&stackpool[order].item.mu)
511 if valgrindenabled {
512
513
514 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
515 }
516 stackpoolfree(x, order)
517 unlock(&stackpool[order].item.mu)
518 } else {
519 c := gp.m.p.ptr().mcache
520 if c.stackcache[order].size >= _StackCacheSize {
521 stackcacherelease(c, order)
522 }
523 if valgrindenabled {
524
525
526
527 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
528 }
529 x.ptr().next = c.stackcache[order].list
530 c.stackcache[order].list = x
531 c.stackcache[order].size += n
532 }
533 } else {
534 s := spanOfUnchecked(uintptr(v))
535 if s.state.get() != mSpanManual {
536 println(hex(s.base()), v)
537 throw("bad span state")
538 }
539 if gcphase == _GCoff {
540
541
542 osStackFree(s)
543 mheap_.freeManual(s, spanAllocStack)
544 } else {
545
546
547
548
549
550 log2npage := stacklog2(s.npages)
551 lock(&stackLarge.lock)
552 stackLarge.free[log2npage].insert(s)
553 unlock(&stackLarge.lock)
554 }
555 }
556 }
557
558 var maxstacksize uintptr = 1 << 20
559
560 var maxstackceiling = maxstacksize
561
562 var ptrnames = []string{
563 0: "scalar",
564 1: "ptr",
565 }
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600 type adjustinfo struct {
601 old stack
602 delta uintptr
603
604
605 sghi uintptr
606 }
607
608
609
610 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
611 pp := (*uintptr)(vpp)
612 p := *pp
613 if stackDebug >= 4 {
614 print(" ", pp, ":", hex(p), "\n")
615 }
616 if valgrindenabled {
617
618
619
620
621
622
623
624 valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p))
625 }
626 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
627 *pp = p + adjinfo.delta
628 if stackDebug >= 3 {
629 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
630 }
631 }
632 }
633
634
635
636 type bitvector struct {
637 n int32
638 bytedata *uint8
639 }
640
641
642
643
644
645 func (bv *bitvector) ptrbit(i uintptr) uint8 {
646 b := *(addb(bv.bytedata, i/8))
647 return (b >> (i % 8)) & 1
648 }
649
650
651
652 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
653 minp := adjinfo.old.lo
654 maxp := adjinfo.old.hi
655 delta := adjinfo.delta
656 num := uintptr(bv.n)
657
658
659
660
661
662 useCAS := uintptr(scanp) < adjinfo.sghi
663 for i := uintptr(0); i < num; i += 8 {
664 if stackDebug >= 4 {
665 for j := uintptr(0); j < 8; j++ {
666 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
667 }
668 }
669 b := *(addb(bv.bytedata, i/8))
670 for b != 0 {
671 j := uintptr(sys.TrailingZeros8(b))
672 b &= b - 1
673 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
674 retry:
675 p := *pp
676 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
677
678
679 getg().m.traceback = 2
680 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
681 throw("invalid pointer found on stack")
682 }
683 if minp <= p && p < maxp {
684 if stackDebug >= 3 {
685 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
686 }
687 if useCAS {
688 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
689 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
690 goto retry
691 }
692 } else {
693 *pp = p + delta
694 }
695 }
696 }
697 }
698 }
699
700
701 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
702 if frame.continpc == 0 {
703
704 return
705 }
706 f := frame.fn
707 if stackDebug >= 2 {
708 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
709 }
710
711
712 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
713 if stackDebug >= 3 {
714 print(" saved bp\n")
715 }
716 if debugCheckBP {
717
718
719 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
720 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
721 println("runtime: found invalid frame pointer")
722 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
723 throw("bad frame pointer")
724 }
725 }
726
727
728
729
730 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
731 }
732
733 locals, args, objs := frame.getStackMap(true)
734
735
736 if locals.n > 0 {
737 size := uintptr(locals.n) * goarch.PtrSize
738 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
739 }
740
741
742 if args.n > 0 {
743 if stackDebug >= 3 {
744 print(" args\n")
745 }
746 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
747 }
748
749
750
751 if frame.varp != 0 {
752 for i := range objs {
753 obj := &objs[i]
754 off := obj.off
755 base := frame.varp
756 if off >= 0 {
757 base = frame.argp
758 }
759 p := base + uintptr(off)
760 if p < frame.sp {
761
762
763
764 continue
765 }
766 ptrBytes, gcData := obj.gcdata()
767 for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
768 if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
769 adjustpointer(adjinfo, unsafe.Pointer(p+i))
770 }
771 }
772 }
773 }
774 }
775
776 func adjustctxt(gp *g, adjinfo *adjustinfo) {
777 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
778 if !framepointer_enabled {
779 return
780 }
781 if debugCheckBP {
782 bp := gp.sched.bp
783 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
784 println("runtime: found invalid top frame pointer")
785 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
786 throw("bad top frame pointer")
787 }
788 }
789 oldfp := gp.sched.bp
790 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
791 if GOARCH == "arm64" {
792
793
794
795 if oldfp == gp.sched.sp-goarch.PtrSize {
796 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
797 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
798 }
799 }
800 }
801
802 func adjustdefers(gp *g, adjinfo *adjustinfo) {
803
804
805
806 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
807 for d := gp._defer; d != nil; d = d.link {
808 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
809 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
810 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
811 }
812 }
813
814 func adjustpanics(gp *g, adjinfo *adjustinfo) {
815
816
817 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
818 }
819
820 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
821
822
823 for s := gp.waiting; s != nil; s = s.waitlink {
824 adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu))
825 adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp))
826 }
827 }
828
829 func fillstack(stk stack, b byte) {
830 for p := stk.lo; p < stk.hi; p++ {
831 *(*byte)(unsafe.Pointer(p)) = b
832 }
833 }
834
835 func findsghi(gp *g, stk stack) uintptr {
836 var sghi uintptr
837 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
838 p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize)
839 if stk.lo <= p && p < stk.hi && p > sghi {
840 sghi = p
841 }
842 }
843 return sghi
844 }
845
846
847
848
849 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
850 if gp.waiting == nil {
851 return 0
852 }
853
854
855 var lastc *hchan
856 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
857 if sg.c.get() != lastc {
858
859
860
861
862
863
864
865
866
867 lockWithRank(&sg.c.get().lock, lockRankHchanLeaf)
868 }
869 lastc = sg.c.get()
870 }
871
872
873 adjustsudogs(gp, adjinfo)
874
875
876
877
878 var sgsize uintptr
879 if adjinfo.sghi != 0 {
880 oldBot := adjinfo.old.hi - used
881 newBot := oldBot + adjinfo.delta
882 sgsize = adjinfo.sghi - oldBot
883 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
884 }
885
886
887 lastc = nil
888 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
889 if sg.c.get() != lastc {
890 unlock(&sg.c.get().lock)
891 }
892 lastc = sg.c.get()
893 }
894
895 return sgsize
896 }
897
898
899
900 func copystack(gp *g, newsize uintptr) {
901 if gp.syscallsp != 0 {
902 throw("stack growth not allowed in system call")
903 }
904 old := gp.stack
905 if old.lo == 0 {
906 throw("nil stackbase")
907 }
908 used := old.hi - gp.sched.sp
909
910
911
912
913 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
914
915
916 new := stackalloc(uint32(newsize))
917 if stackPoisonCopy != 0 {
918 fillstack(new, 0xfd)
919 }
920 if stackDebug >= 1 {
921 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
922 }
923
924
925 var adjinfo adjustinfo
926 adjinfo.old = old
927 adjinfo.delta = new.hi - old.hi
928
929
930 ncopy := used
931 if !gp.activeStackChans {
932 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
933
934
935
936
937 throw("racy sudog adjustment due to parking on channel")
938 }
939 adjustsudogs(gp, &adjinfo)
940 } else {
941
942
943
944
945
946
947
948 adjinfo.sghi = findsghi(gp, old)
949
950
951
952 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
953 }
954
955
956 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
957
958
959
960
961 adjustctxt(gp, &adjinfo)
962 adjustdefers(gp, &adjinfo)
963 adjustpanics(gp, &adjinfo)
964 if adjinfo.sghi != 0 {
965 adjinfo.sghi += adjinfo.delta
966 }
967
968
969 gp.stack = new
970 gp.stackguard0 = new.lo + stackGuard
971 gp.sched.sp = new.hi - used
972 gp.stktopsp += adjinfo.delta
973
974
975 var u unwinder
976 for u.init(gp, 0); u.valid(); u.next() {
977 adjustframe(&u.frame, &adjinfo)
978 }
979
980 if valgrindenabled {
981 if gp.valgrindStackID == 0 {
982 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
983 } else {
984 valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
985 }
986 }
987
988
989 if goexperiment.RuntimeSecret && gp.secret > 0 {
990
991
992
993
994
995 memclrNoHeapPointers(unsafe.Pointer(old.lo), old.hi-old.lo)
996
997 secretEraseRegisters()
998 }
999 if stackPoisonCopy != 0 {
1000 fillstack(old, 0xfc)
1001 }
1002 stackfree(old)
1003 }
1004
1005
1006 func round2(x int32) int32 {
1007 s := uint(0)
1008 for 1<<s < x {
1009 s++
1010 }
1011 return 1 << s
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 func newstack() {
1027 thisg := getg()
1028
1029 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
1030 throw("stack growth after fork")
1031 }
1032 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
1033 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
1034 morebuf := thisg.m.morebuf
1035 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
1036 throw("runtime: wrong goroutine in newstack")
1037 }
1038
1039 gp := thisg.m.curg
1040 if goexperiment.RuntimeSecret && gp.secret > 0 {
1041
1042
1043
1044
1045
1046 secretEraseRegisters()
1047 }
1048
1049 if thisg.m.curg.throwsplit {
1050
1051 morebuf := thisg.m.morebuf
1052 gp.syscallsp = morebuf.sp
1053 gp.syscallpc = morebuf.pc
1054 pcname, pcoff := "(unknown)", uintptr(0)
1055 f := findfunc(gp.sched.pc)
1056 if f.valid() {
1057 pcname = funcname(f)
1058 pcoff = gp.sched.pc - f.entry()
1059 }
1060 print("runtime: newstack at ", pcname, "+", hex(pcoff),
1061 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1062 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1063 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1064
1065 thisg.m.traceback = 2
1066 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
1067 throw("runtime: stack split at bad time")
1068 }
1069
1070 morebuf := thisg.m.morebuf
1071 thisg.m.morebuf.pc = 0
1072 thisg.m.morebuf.lr = 0
1073 thisg.m.morebuf.sp = 0
1074 thisg.m.morebuf.g = 0
1075
1076
1077
1078
1079 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 preempt := stackguard0 == stackPreempt
1094 if preempt {
1095 if !canPreemptM(thisg.m) {
1096
1097
1098 gp.stackguard0 = gp.stack.lo + stackGuard
1099 gogo(&gp.sched)
1100 }
1101 }
1102
1103 if gp.stack.lo == 0 {
1104 throw("missing stack in newstack")
1105 }
1106 sp := gp.sched.sp
1107 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1108
1109 sp -= goarch.PtrSize
1110 }
1111 if stackDebug >= 1 || sp < gp.stack.lo {
1112 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1113 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1114 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1115 }
1116 if sp < gp.stack.lo {
1117 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1118 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1119 throw("runtime: split stack overflow")
1120 }
1121
1122 if preempt {
1123 if gp == thisg.m.g0 {
1124 throw("runtime: preempt g0")
1125 }
1126 if thisg.m.p == 0 && thisg.m.locks == 0 {
1127 throw("runtime: g is running but p is not")
1128 }
1129
1130 if gp.preemptShrink {
1131
1132
1133 gp.preemptShrink = false
1134 shrinkstack(gp)
1135 }
1136
1137
1138 gp.syncSafePoint = true
1139
1140 if gp.preemptStop {
1141 preemptPark(gp)
1142 }
1143
1144
1145 gopreempt_m(gp)
1146 }
1147
1148
1149 oldsize := gp.stack.hi - gp.stack.lo
1150 newsize := oldsize * 2
1151
1152
1153
1154
1155 if f := findfunc(gp.sched.pc); f.valid() {
1156 max := uintptr(funcMaxSPDelta(f))
1157 needed := max + stackGuard
1158 used := gp.stack.hi - gp.sched.sp
1159 for newsize-used < needed {
1160 newsize *= 2
1161 }
1162 }
1163
1164 if stackguard0 == stackForceMove {
1165
1166
1167
1168 newsize = oldsize
1169 }
1170
1171 if newsize > maxstacksize || newsize > maxstackceiling {
1172 if maxstacksize < maxstackceiling {
1173 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1174 } else {
1175 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1176 }
1177 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1178 throw("stack overflow")
1179 }
1180
1181
1182
1183 casgstatus(gp, _Grunning, _Gcopystack)
1184
1185
1186
1187 copystack(gp, newsize)
1188 if stackDebug >= 1 {
1189 print("stack grow done\n")
1190 }
1191 casgstatus(gp, _Gcopystack, _Grunning)
1192 gogo(&gp.sched)
1193 }
1194
1195
1196 func nilfunc() {
1197 *(*uint8)(nil) = 0
1198 }
1199
1200
1201
1202 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1203 var fn unsafe.Pointer
1204 if fv != nil {
1205 fn = unsafe.Pointer(fv.fn)
1206 } else {
1207 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1208 }
1209 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1210 }
1211
1212
1213
1214
1215
1216 func isShrinkStackSafe(gp *g) bool {
1217
1218
1219
1220
1221 if gp.syscallsp != 0 {
1222 return false
1223 }
1224
1225
1226
1227 if gp.asyncSafePoint {
1228 return false
1229 }
1230
1231
1232
1233 if gp.parkingOnChan.Load() {
1234 return false
1235 }
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 if readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() {
1248 return false
1249 }
1250 return true
1251 }
1252
1253
1254
1255
1256
1257 func shrinkstack(gp *g) {
1258 if gp.stack.lo == 0 {
1259 throw("missing stack in shrinkstack")
1260 }
1261 if s := readgstatus(gp); s&_Gscan == 0 {
1262
1263
1264
1265 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1266
1267 throw("bad status in shrinkstack")
1268 }
1269 }
1270 if !isShrinkStackSafe(gp) {
1271 throw("shrinkstack at bad time")
1272 }
1273
1274
1275
1276 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1277 throw("shrinking stack in libcall")
1278 }
1279
1280 if debug.gcshrinkstackoff > 0 {
1281 return
1282 }
1283
1284 oldsize := gp.stack.hi - gp.stack.lo
1285 newsize := oldsize / 2
1286
1287
1288 if newsize < fixedStack {
1289 return
1290 }
1291
1292
1293
1294
1295
1296 avail := gp.stack.hi - gp.stack.lo
1297 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1298 return
1299 }
1300
1301 if stackDebug > 0 {
1302 print("shrinking stack ", oldsize, "->", newsize, "\n")
1303 }
1304
1305 copystack(gp, newsize)
1306 }
1307
1308
1309 func freeStackSpans() {
1310
1311 for order := range stackpool {
1312 lock(&stackpool[order].item.mu)
1313 list := &stackpool[order].item.span
1314 for s := list.first; s != nil; {
1315 next := s.next
1316 if s.allocCount == 0 {
1317 list.remove(s)
1318 s.manualFreeList = 0
1319 osStackFree(s)
1320 mheap_.freeManual(s, spanAllocStack)
1321 }
1322 s = next
1323 }
1324 unlock(&stackpool[order].item.mu)
1325 }
1326
1327
1328 lock(&stackLarge.lock)
1329 for i := range stackLarge.free {
1330 for s := stackLarge.free[i].first; s != nil; {
1331 next := s.next
1332 stackLarge.free[i].remove(s)
1333 osStackFree(s)
1334 mheap_.freeManual(s, spanAllocStack)
1335 s = next
1336 }
1337 }
1338 unlock(&stackLarge.lock)
1339 }
1340
1341
1342
1343 type stackObjectRecord struct {
1344
1345
1346
1347 off int32
1348 size int32
1349 ptrBytes int32
1350 gcdataoff uint32
1351 }
1352
1353
1354
1355
1356 func (r *stackObjectRecord) gcdata() (uintptr, *byte) {
1357 ptr := uintptr(unsafe.Pointer(r))
1358 var mod *moduledata
1359 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1360 if datap.gofunc <= ptr && ptr < datap.end {
1361 mod = datap
1362 break
1363 }
1364 }
1365
1366
1367
1368 res := mod.rodata + uintptr(r.gcdataoff)
1369 return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res))
1370 }
1371
1372
1373
1374
1375
1376 func morestackc() {
1377 throw("attempt to execute system stack code on user stack")
1378 }
1379
1380
1381
1382
1383
1384 var startingStackSize uint32 = fixedStack
1385
1386 func gcComputeStartingStackSize() {
1387 if debug.adaptivestackstart == 0 {
1388 return
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 var scannedStackSize uint64
1401 var scannedStacks uint64
1402 for _, p := range allp {
1403 scannedStackSize += p.scannedStackSize
1404 scannedStacks += p.scannedStacks
1405
1406 p.scannedStackSize = 0
1407 p.scannedStacks = 0
1408 }
1409 if scannedStacks == 0 {
1410 startingStackSize = fixedStack
1411 return
1412 }
1413 avg := scannedStackSize/scannedStacks + stackGuard
1414
1415
1416 if avg > uint64(maxstacksize) {
1417 avg = uint64(maxstacksize)
1418 }
1419 if avg < fixedStack {
1420 avg = fixedStack
1421 }
1422
1423 startingStackSize = uint32(round2(int32(avg)))
1424 }
1425
View as plain text