Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 stackMin = 2048
76
77
78
79 fixedStack0 = stackMin + stackSystem
80 fixedStack1 = fixedStack0 - 1
81 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
82 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
83 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
84 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
85 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
86 fixedStack = fixedStack6 + 1
87
88
89
90
91 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
92
93
94
95
96
97
98
99 stackGuard = stackNosplit + stackSystem + abi.StackSmall
100 )
101
102 const (
103
104
105
106
107
108 stackDebug = 0
109 stackFromSystem = 0
110 stackFaultOnFree = 0
111 stackNoCache = 0
112
113
114 debugCheckBP = false
115 )
116
117 var (
118 stackPoisonCopy = 0
119 )
120
121 const (
122 uintptrMask = 1<<(8*goarch.PtrSize) - 1
123
124
125
126
127
128
129
130 stackPreempt = uintptrMask & -1314
131
132
133
134 stackFork = uintptrMask & -1234
135
136
137
138 stackForceMove = uintptrMask & -275
139
140
141 stackPoisonMin = uintptrMask & -4096
142 )
143
144
145
146
147
148
149
150 var stackpool [_NumStackOrders]struct {
151 item stackpoolItem
152 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
153 }
154
155 type stackpoolItem struct {
156 _ sys.NotInHeap
157 mu mutex
158 span mSpanList
159 }
160
161
162 var stackLarge struct {
163 lock mutex
164 free [heapAddrBits - pageShift]mSpanList
165 }
166
167 func stackinit() {
168 if _StackCacheSize&_PageMask != 0 {
169 throw("cache size must be a multiple of page size")
170 }
171 for i := range stackpool {
172 stackpool[i].item.span.init()
173 lockInit(&stackpool[i].item.mu, lockRankStackpool)
174 }
175 for i := range stackLarge.free {
176 stackLarge.free[i].init()
177 lockInit(&stackLarge.lock, lockRankStackLarge)
178 }
179 }
180
181
182 func stacklog2(n uintptr) int {
183 log2 := 0
184 for n > 1 {
185 n >>= 1
186 log2++
187 }
188 return log2
189 }
190
191
192
193 func stackpoolalloc(order uint8) gclinkptr {
194 list := &stackpool[order].item.span
195 s := list.first
196 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
197 if s == nil {
198
199 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
200 if s == nil {
201 throw("out of memory")
202 }
203 if s.allocCount != 0 {
204 throw("bad allocCount")
205 }
206 if s.manualFreeList.ptr() != nil {
207 throw("bad manualFreeList")
208 }
209 osStackAlloc(s)
210 s.elemsize = fixedStack << order
211 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
212 x := gclinkptr(s.base() + i)
213 x.ptr().next = s.manualFreeList
214 s.manualFreeList = x
215 }
216 list.insert(s)
217 }
218 x := s.manualFreeList
219 if x.ptr() == nil {
220 throw("span has no free stacks")
221 }
222 s.manualFreeList = x.ptr().next
223 s.allocCount++
224 if s.manualFreeList.ptr() == nil {
225
226 list.remove(s)
227 }
228 return x
229 }
230
231
232 func stackpoolfree(x gclinkptr, order uint8) {
233 s := spanOfUnchecked(uintptr(x))
234 if s.state.get() != mSpanManual {
235 throw("freeing stack not in a stack span")
236 }
237 if s.manualFreeList.ptr() == nil {
238
239 stackpool[order].item.span.insert(s)
240 }
241 x.ptr().next = s.manualFreeList
242 s.manualFreeList = x
243 s.allocCount--
244 if gcphase == _GCoff && s.allocCount == 0 {
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260 stackpool[order].item.span.remove(s)
261 s.manualFreeList = 0
262 osStackFree(s)
263 mheap_.freeManual(s, spanAllocStack)
264 }
265 }
266
267
268
269
270
271 func stackcacherefill(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherefill order=", order, "\n")
274 }
275
276
277
278 var list gclinkptr
279 var size uintptr
280 lock(&stackpool[order].item.mu)
281 for size < _StackCacheSize/2 {
282 x := stackpoolalloc(order)
283 x.ptr().next = list
284 list = x
285 size += fixedStack << order
286 }
287 unlock(&stackpool[order].item.mu)
288 c.stackcache[order].list = list
289 c.stackcache[order].size = size
290 }
291
292
293 func stackcacherelease(c *mcache, order uint8) {
294 if stackDebug >= 1 {
295 print("stackcacherelease order=", order, "\n")
296 }
297 x := c.stackcache[order].list
298 size := c.stackcache[order].size
299 lock(&stackpool[order].item.mu)
300 for size > _StackCacheSize/2 {
301 y := x.ptr().next
302 stackpoolfree(x, order)
303 x = y
304 size -= fixedStack << order
305 }
306 unlock(&stackpool[order].item.mu)
307 c.stackcache[order].list = x
308 c.stackcache[order].size = size
309 }
310
311
312 func stackcache_clear(c *mcache) {
313 if stackDebug >= 1 {
314 print("stackcache clear\n")
315 }
316 for order := uint8(0); order < _NumStackOrders; order++ {
317 lock(&stackpool[order].item.mu)
318 x := c.stackcache[order].list
319 for x.ptr() != nil {
320 y := x.ptr().next
321 stackpoolfree(x, order)
322 x = y
323 }
324 c.stackcache[order].list = 0
325 c.stackcache[order].size = 0
326 unlock(&stackpool[order].item.mu)
327 }
328 }
329
330
331
332
333
334
335
336 func stackalloc(n uint32) stack {
337
338
339
340 thisg := getg()
341 if thisg != thisg.m.g0 {
342 throw("stackalloc not on scheduler stack")
343 }
344 if n&(n-1) != 0 {
345 throw("stack size not a power of 2")
346 }
347 if stackDebug >= 1 {
348 print("stackalloc ", n, "\n")
349 }
350
351 if debug.efence != 0 || stackFromSystem != 0 {
352 n = uint32(alignUp(uintptr(n), physPageSize))
353 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
354 if v == nil {
355 throw("out of memory (stackalloc)")
356 }
357 return stack{uintptr(v), uintptr(v) + uintptr(n)}
358 }
359
360
361
362
363 var v unsafe.Pointer
364 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
365 order := uint8(0)
366 n2 := n
367 for n2 > fixedStack {
368 order++
369 n2 >>= 1
370 }
371 var x gclinkptr
372 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
373
374
375
376
377 lock(&stackpool[order].item.mu)
378 x = stackpoolalloc(order)
379 unlock(&stackpool[order].item.mu)
380 } else {
381 c := thisg.m.p.ptr().mcache
382 x = c.stackcache[order].list
383 if x.ptr() == nil {
384 stackcacherefill(c, order)
385 x = c.stackcache[order].list
386 }
387 c.stackcache[order].list = x.ptr().next
388 c.stackcache[order].size -= uintptr(n)
389 }
390 v = unsafe.Pointer(x)
391 } else {
392 var s *mspan
393 npage := uintptr(n) >> _PageShift
394 log2npage := stacklog2(npage)
395
396
397 lock(&stackLarge.lock)
398 if !stackLarge.free[log2npage].isEmpty() {
399 s = stackLarge.free[log2npage].first
400 stackLarge.free[log2npage].remove(s)
401 }
402 unlock(&stackLarge.lock)
403
404 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
405
406 if s == nil {
407
408 s = mheap_.allocManual(npage, spanAllocStack)
409 if s == nil {
410 throw("out of memory")
411 }
412 osStackAlloc(s)
413 s.elemsize = uintptr(n)
414 }
415 v = unsafe.Pointer(s.base())
416 }
417
418 if traceAllocFreeEnabled() {
419 trace := traceAcquire()
420 if trace.ok() {
421 trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
422 traceRelease(trace)
423 }
424 }
425 if raceenabled {
426 racemalloc(v, uintptr(n))
427 }
428 if msanenabled {
429 msanmalloc(v, uintptr(n))
430 }
431 if asanenabled {
432 asanunpoison(v, uintptr(n))
433 }
434 if stackDebug >= 1 {
435 print(" allocated ", v, "\n")
436 }
437 return stack{uintptr(v), uintptr(v) + uintptr(n)}
438 }
439
440
441
442
443
444
445
446 func stackfree(stk stack) {
447 gp := getg()
448 v := unsafe.Pointer(stk.lo)
449 n := stk.hi - stk.lo
450 if n&(n-1) != 0 {
451 throw("stack not a power of 2")
452 }
453 if stk.lo+n < stk.hi {
454 throw("bad stack size")
455 }
456 if stackDebug >= 1 {
457 println("stackfree", v, n)
458 memclrNoHeapPointers(v, n)
459 }
460 if debug.efence != 0 || stackFromSystem != 0 {
461 if debug.efence != 0 || stackFaultOnFree != 0 {
462 sysFault(v, n)
463 } else {
464 sysFree(v, n, &memstats.stacks_sys)
465 }
466 return
467 }
468 if traceAllocFreeEnabled() {
469 trace := traceAcquire()
470 if trace.ok() {
471 trace.GoroutineStackFree(uintptr(v))
472 traceRelease(trace)
473 }
474 }
475 if msanenabled {
476 msanfree(v, n)
477 }
478 if asanenabled {
479 asanpoison(v, n)
480 }
481 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
482 order := uint8(0)
483 n2 := n
484 for n2 > fixedStack {
485 order++
486 n2 >>= 1
487 }
488 x := gclinkptr(v)
489 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
490 lock(&stackpool[order].item.mu)
491 stackpoolfree(x, order)
492 unlock(&stackpool[order].item.mu)
493 } else {
494 c := gp.m.p.ptr().mcache
495 if c.stackcache[order].size >= _StackCacheSize {
496 stackcacherelease(c, order)
497 }
498 x.ptr().next = c.stackcache[order].list
499 c.stackcache[order].list = x
500 c.stackcache[order].size += n
501 }
502 } else {
503 s := spanOfUnchecked(uintptr(v))
504 if s.state.get() != mSpanManual {
505 println(hex(s.base()), v)
506 throw("bad span state")
507 }
508 if gcphase == _GCoff {
509
510
511 osStackFree(s)
512 mheap_.freeManual(s, spanAllocStack)
513 } else {
514
515
516
517
518
519 log2npage := stacklog2(s.npages)
520 lock(&stackLarge.lock)
521 stackLarge.free[log2npage].insert(s)
522 unlock(&stackLarge.lock)
523 }
524 }
525 }
526
527 var maxstacksize uintptr = 1 << 20
528
529 var maxstackceiling = maxstacksize
530
531 var ptrnames = []string{
532 0: "scalar",
533 1: "ptr",
534 }
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 type adjustinfo struct {
570 old stack
571 delta uintptr
572
573
574 sghi uintptr
575 }
576
577
578
579 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
580 pp := (*uintptr)(vpp)
581 p := *pp
582 if stackDebug >= 4 {
583 print(" ", pp, ":", hex(p), "\n")
584 }
585 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
586 *pp = p + adjinfo.delta
587 if stackDebug >= 3 {
588 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
589 }
590 }
591 }
592
593
594
595 type bitvector struct {
596 n int32
597 bytedata *uint8
598 }
599
600
601
602
603
604 func (bv *bitvector) ptrbit(i uintptr) uint8 {
605 b := *(addb(bv.bytedata, i/8))
606 return (b >> (i % 8)) & 1
607 }
608
609
610
611 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
612 minp := adjinfo.old.lo
613 maxp := adjinfo.old.hi
614 delta := adjinfo.delta
615 num := uintptr(bv.n)
616
617
618
619
620
621 useCAS := uintptr(scanp) < adjinfo.sghi
622 for i := uintptr(0); i < num; i += 8 {
623 if stackDebug >= 4 {
624 for j := uintptr(0); j < 8; j++ {
625 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
626 }
627 }
628 b := *(addb(bv.bytedata, i/8))
629 for b != 0 {
630 j := uintptr(sys.TrailingZeros8(b))
631 b &= b - 1
632 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
633 retry:
634 p := *pp
635 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
636
637
638 getg().m.traceback = 2
639 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
640 throw("invalid pointer found on stack")
641 }
642 if minp <= p && p < maxp {
643 if stackDebug >= 3 {
644 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
645 }
646 if useCAS {
647 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
648 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
649 goto retry
650 }
651 } else {
652 *pp = p + delta
653 }
654 }
655 }
656 }
657 }
658
659
660 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
661 if frame.continpc == 0 {
662
663 return
664 }
665 f := frame.fn
666 if stackDebug >= 2 {
667 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
668 }
669
670
671 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
672 if stackDebug >= 3 {
673 print(" saved bp\n")
674 }
675 if debugCheckBP {
676
677
678 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
679 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
680 println("runtime: found invalid frame pointer")
681 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
682 throw("bad frame pointer")
683 }
684 }
685
686
687
688
689 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
690 }
691
692 locals, args, objs := frame.getStackMap(true)
693
694
695 if locals.n > 0 {
696 size := uintptr(locals.n) * goarch.PtrSize
697 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
698 }
699
700
701 if args.n > 0 {
702 if stackDebug >= 3 {
703 print(" args\n")
704 }
705 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
706 }
707
708
709
710 if frame.varp != 0 {
711 for i := range objs {
712 obj := &objs[i]
713 off := obj.off
714 base := frame.varp
715 if off >= 0 {
716 base = frame.argp
717 }
718 p := base + uintptr(off)
719 if p < frame.sp {
720
721
722
723 continue
724 }
725 ptrdata := obj.ptrdata()
726 gcdata := obj.gcdata()
727 var s *mspan
728 if obj.useGCProg() {
729
730 s = materializeGCProg(ptrdata, gcdata)
731 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
732 }
733 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
734 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
735 adjustpointer(adjinfo, unsafe.Pointer(p+i))
736 }
737 }
738 if s != nil {
739 dematerializeGCProg(s)
740 }
741 }
742 }
743 }
744
745 func adjustctxt(gp *g, adjinfo *adjustinfo) {
746 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
747 if !framepointer_enabled {
748 return
749 }
750 if debugCheckBP {
751 bp := gp.sched.bp
752 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
753 println("runtime: found invalid top frame pointer")
754 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
755 throw("bad top frame pointer")
756 }
757 }
758 oldfp := gp.sched.bp
759 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
760 if GOARCH == "arm64" {
761
762
763
764 if oldfp == gp.sched.sp-goarch.PtrSize {
765 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
766 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
767 }
768 }
769 }
770
771 func adjustdefers(gp *g, adjinfo *adjustinfo) {
772
773
774
775 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
776 for d := gp._defer; d != nil; d = d.link {
777 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
778 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
779 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
780 }
781 }
782
783 func adjustpanics(gp *g, adjinfo *adjustinfo) {
784
785
786 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
787 }
788
789 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
790
791
792 for s := gp.waiting; s != nil; s = s.waitlink {
793 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
794 }
795 }
796
797 func fillstack(stk stack, b byte) {
798 for p := stk.lo; p < stk.hi; p++ {
799 *(*byte)(unsafe.Pointer(p)) = b
800 }
801 }
802
803 func findsghi(gp *g, stk stack) uintptr {
804 var sghi uintptr
805 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
806 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
807 if stk.lo <= p && p < stk.hi && p > sghi {
808 sghi = p
809 }
810 }
811 return sghi
812 }
813
814
815
816
817 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
818 if gp.waiting == nil {
819 return 0
820 }
821
822
823 var lastc *hchan
824 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
825 if sg.c != lastc {
826
827
828
829
830
831
832
833
834
835 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
836 }
837 lastc = sg.c
838 }
839
840
841 adjustsudogs(gp, adjinfo)
842
843
844
845
846 var sgsize uintptr
847 if adjinfo.sghi != 0 {
848 oldBot := adjinfo.old.hi - used
849 newBot := oldBot + adjinfo.delta
850 sgsize = adjinfo.sghi - oldBot
851 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
852 }
853
854
855 lastc = nil
856 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
857 if sg.c != lastc {
858 unlock(&sg.c.lock)
859 }
860 lastc = sg.c
861 }
862
863 return sgsize
864 }
865
866
867
868 func copystack(gp *g, newsize uintptr) {
869 if gp.syscallsp != 0 {
870 throw("stack growth not allowed in system call")
871 }
872 old := gp.stack
873 if old.lo == 0 {
874 throw("nil stackbase")
875 }
876 used := old.hi - gp.sched.sp
877
878
879
880
881 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
882
883
884 new := stackalloc(uint32(newsize))
885 if stackPoisonCopy != 0 {
886 fillstack(new, 0xfd)
887 }
888 if stackDebug >= 1 {
889 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
890 }
891
892
893 var adjinfo adjustinfo
894 adjinfo.old = old
895 adjinfo.delta = new.hi - old.hi
896
897
898 ncopy := used
899 if !gp.activeStackChans {
900 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
901
902
903
904
905 throw("racy sudog adjustment due to parking on channel")
906 }
907 adjustsudogs(gp, &adjinfo)
908 } else {
909
910
911
912
913
914
915
916 adjinfo.sghi = findsghi(gp, old)
917
918
919
920 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
921 }
922
923
924 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
925
926
927
928
929 adjustctxt(gp, &adjinfo)
930 adjustdefers(gp, &adjinfo)
931 adjustpanics(gp, &adjinfo)
932 if adjinfo.sghi != 0 {
933 adjinfo.sghi += adjinfo.delta
934 }
935
936
937 gp.stack = new
938 gp.stackguard0 = new.lo + stackGuard
939 gp.sched.sp = new.hi - used
940 gp.stktopsp += adjinfo.delta
941
942
943 var u unwinder
944 for u.init(gp, 0); u.valid(); u.next() {
945 adjustframe(&u.frame, &adjinfo)
946 }
947
948
949 if stackPoisonCopy != 0 {
950 fillstack(old, 0xfc)
951 }
952 stackfree(old)
953 }
954
955
956 func round2(x int32) int32 {
957 s := uint(0)
958 for 1<<s < x {
959 s++
960 }
961 return 1 << s
962 }
963
964
965
966
967
968
969
970
971
972
973
974
975
976 func newstack() {
977 thisg := getg()
978
979 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
980 throw("stack growth after fork")
981 }
982 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
983 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
984 morebuf := thisg.m.morebuf
985 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
986 throw("runtime: wrong goroutine in newstack")
987 }
988
989 gp := thisg.m.curg
990
991 if thisg.m.curg.throwsplit {
992
993 morebuf := thisg.m.morebuf
994 gp.syscallsp = morebuf.sp
995 gp.syscallpc = morebuf.pc
996 pcname, pcoff := "(unknown)", uintptr(0)
997 f := findfunc(gp.sched.pc)
998 if f.valid() {
999 pcname = funcname(f)
1000 pcoff = gp.sched.pc - f.entry()
1001 }
1002 print("runtime: newstack at ", pcname, "+", hex(pcoff),
1003 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1004 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1005 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1006
1007 thisg.m.traceback = 2
1008 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
1009 throw("runtime: stack split at bad time")
1010 }
1011
1012 morebuf := thisg.m.morebuf
1013 thisg.m.morebuf.pc = 0
1014 thisg.m.morebuf.lr = 0
1015 thisg.m.morebuf.sp = 0
1016 thisg.m.morebuf.g = 0
1017
1018
1019
1020
1021 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 preempt := stackguard0 == stackPreempt
1036 if preempt {
1037 if !canPreemptM(thisg.m) {
1038
1039
1040 gp.stackguard0 = gp.stack.lo + stackGuard
1041 gogo(&gp.sched)
1042 }
1043 }
1044
1045 if gp.stack.lo == 0 {
1046 throw("missing stack in newstack")
1047 }
1048 sp := gp.sched.sp
1049 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1050
1051 sp -= goarch.PtrSize
1052 }
1053 if stackDebug >= 1 || sp < gp.stack.lo {
1054 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1055 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1056 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1057 }
1058 if sp < gp.stack.lo {
1059 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1060 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1061 throw("runtime: split stack overflow")
1062 }
1063
1064 if preempt {
1065 if gp == thisg.m.g0 {
1066 throw("runtime: preempt g0")
1067 }
1068 if thisg.m.p == 0 && thisg.m.locks == 0 {
1069 throw("runtime: g is running but p is not")
1070 }
1071
1072 if gp.preemptShrink {
1073
1074
1075 gp.preemptShrink = false
1076 shrinkstack(gp)
1077 }
1078
1079 if gp.preemptStop {
1080 preemptPark(gp)
1081 }
1082
1083
1084 gopreempt_m(gp)
1085 }
1086
1087
1088 oldsize := gp.stack.hi - gp.stack.lo
1089 newsize := oldsize * 2
1090
1091
1092
1093
1094 if f := findfunc(gp.sched.pc); f.valid() {
1095 max := uintptr(funcMaxSPDelta(f))
1096 needed := max + stackGuard
1097 used := gp.stack.hi - gp.sched.sp
1098 for newsize-used < needed {
1099 newsize *= 2
1100 }
1101 }
1102
1103 if stackguard0 == stackForceMove {
1104
1105
1106
1107 newsize = oldsize
1108 }
1109
1110 if newsize > maxstacksize || newsize > maxstackceiling {
1111 if maxstacksize < maxstackceiling {
1112 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1113 } else {
1114 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1115 }
1116 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1117 throw("stack overflow")
1118 }
1119
1120
1121
1122 casgstatus(gp, _Grunning, _Gcopystack)
1123
1124
1125
1126 copystack(gp, newsize)
1127 if stackDebug >= 1 {
1128 print("stack grow done\n")
1129 }
1130 casgstatus(gp, _Gcopystack, _Grunning)
1131 gogo(&gp.sched)
1132 }
1133
1134
1135 func nilfunc() {
1136 *(*uint8)(nil) = 0
1137 }
1138
1139
1140
1141 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1142 var fn unsafe.Pointer
1143 if fv != nil {
1144 fn = unsafe.Pointer(fv.fn)
1145 } else {
1146 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1147 }
1148 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1149 }
1150
1151
1152
1153
1154
1155 func isShrinkStackSafe(gp *g) bool {
1156
1157
1158
1159
1160 if gp.syscallsp != 0 {
1161 return false
1162 }
1163
1164
1165
1166 if gp.asyncSafePoint {
1167 return false
1168 }
1169
1170
1171
1172 if gp.parkingOnChan.Load() {
1173 return false
1174 }
1175
1176
1177
1178
1179
1180
1181
1182
1183 if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() {
1184 return false
1185 }
1186 return true
1187 }
1188
1189
1190
1191
1192
1193 func shrinkstack(gp *g) {
1194 if gp.stack.lo == 0 {
1195 throw("missing stack in shrinkstack")
1196 }
1197 if s := readgstatus(gp); s&_Gscan == 0 {
1198
1199
1200
1201 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1202
1203 throw("bad status in shrinkstack")
1204 }
1205 }
1206 if !isShrinkStackSafe(gp) {
1207 throw("shrinkstack at bad time")
1208 }
1209
1210
1211
1212 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1213 throw("shrinking stack in libcall")
1214 }
1215
1216 if debug.gcshrinkstackoff > 0 {
1217 return
1218 }
1219 f := findfunc(gp.startpc)
1220 if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
1221
1222
1223 return
1224 }
1225
1226 oldsize := gp.stack.hi - gp.stack.lo
1227 newsize := oldsize / 2
1228
1229
1230 if newsize < fixedStack {
1231 return
1232 }
1233
1234
1235
1236
1237
1238 avail := gp.stack.hi - gp.stack.lo
1239 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1240 return
1241 }
1242
1243 if stackDebug > 0 {
1244 print("shrinking stack ", oldsize, "->", newsize, "\n")
1245 }
1246
1247 copystack(gp, newsize)
1248 }
1249
1250
1251 func freeStackSpans() {
1252
1253 for order := range stackpool {
1254 lock(&stackpool[order].item.mu)
1255 list := &stackpool[order].item.span
1256 for s := list.first; s != nil; {
1257 next := s.next
1258 if s.allocCount == 0 {
1259 list.remove(s)
1260 s.manualFreeList = 0
1261 osStackFree(s)
1262 mheap_.freeManual(s, spanAllocStack)
1263 }
1264 s = next
1265 }
1266 unlock(&stackpool[order].item.mu)
1267 }
1268
1269
1270 lock(&stackLarge.lock)
1271 for i := range stackLarge.free {
1272 for s := stackLarge.free[i].first; s != nil; {
1273 next := s.next
1274 stackLarge.free[i].remove(s)
1275 osStackFree(s)
1276 mheap_.freeManual(s, spanAllocStack)
1277 s = next
1278 }
1279 }
1280 unlock(&stackLarge.lock)
1281 }
1282
1283
1284
1285 type stackObjectRecord struct {
1286
1287
1288
1289 off int32
1290 size int32
1291 _ptrdata int32
1292 gcdataoff uint32
1293 }
1294
1295 func (r *stackObjectRecord) useGCProg() bool {
1296 return r._ptrdata < 0
1297 }
1298
1299 func (r *stackObjectRecord) ptrdata() uintptr {
1300 x := r._ptrdata
1301 if x < 0 {
1302 return uintptr(-x)
1303 }
1304 return uintptr(x)
1305 }
1306
1307
1308 func (r *stackObjectRecord) gcdata() *byte {
1309 ptr := uintptr(unsafe.Pointer(r))
1310 var mod *moduledata
1311 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1312 if datap.gofunc <= ptr && ptr < datap.end {
1313 mod = datap
1314 break
1315 }
1316 }
1317
1318
1319
1320 res := mod.rodata + uintptr(r.gcdataoff)
1321 return (*byte)(unsafe.Pointer(res))
1322 }
1323
1324
1325
1326
1327
1328 func morestackc() {
1329 throw("attempt to execute system stack code on user stack")
1330 }
1331
1332
1333
1334
1335
1336 var startingStackSize uint32 = fixedStack
1337
1338 func gcComputeStartingStackSize() {
1339 if debug.adaptivestackstart == 0 {
1340 return
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 var scannedStackSize uint64
1353 var scannedStacks uint64
1354 for _, p := range allp {
1355 scannedStackSize += p.scannedStackSize
1356 scannedStacks += p.scannedStacks
1357
1358 p.scannedStackSize = 0
1359 p.scannedStacks = 0
1360 }
1361 if scannedStacks == 0 {
1362 startingStackSize = fixedStack
1363 return
1364 }
1365 avg := scannedStackSize/scannedStacks + stackGuard
1366
1367
1368 if avg > uint64(maxstacksize) {
1369 avg = uint64(maxstacksize)
1370 }
1371 if avg < fixedStack {
1372 avg = fixedStack
1373 }
1374
1375 startingStackSize = uint32(round2(int32(avg)))
1376 }
1377
View as plain text