Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/gc"
14 "internal/runtime/sys"
15 "unsafe"
16 )
17
18
67
68 const (
69
70
71
72
73 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
74
75
76 stackMin = 2048
77
78
79
80 fixedStack0 = stackMin + stackSystem
81 fixedStack1 = fixedStack0 - 1
82 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
83 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
84 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
85 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
86 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
87 fixedStack = fixedStack6 + 1
88
89
90
91
92 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
93
94
95
96
97
98
99
100 stackGuard = stackNosplit + stackSystem + abi.StackSmall
101 )
102
103 const (
104
105
106
107
108
109 stackDebug = 0
110 stackFromSystem = 0
111 stackFaultOnFree = 0
112 stackNoCache = 0
113
114
115 debugCheckBP = false
116 )
117
118 var (
119 stackPoisonCopy = 0
120 )
121
122 const (
123 uintptrMask = 1<<(8*goarch.PtrSize) - 1
124
125
126
127
128
129
130
131 stackPreempt = uintptrMask & -1314
132
133
134
135 stackFork = uintptrMask & -1234
136
137
138
139 stackForceMove = uintptrMask & -275
140
141
142 stackPoisonMin = uintptrMask & -4096
143 )
144
145
146
147
148
149
150
151 var stackpool [_NumStackOrders]struct {
152 item stackpoolItem
153 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
154 }
155
156 type stackpoolItem struct {
157 _ sys.NotInHeap
158 mu mutex
159 span mSpanList
160 }
161
162
163 var stackLarge struct {
164 lock mutex
165 free [heapAddrBits - gc.PageShift]mSpanList
166 }
167
168 func stackinit() {
169 if _StackCacheSize&pageMask != 0 {
170 throw("cache size must be a multiple of page size")
171 }
172 for i := range stackpool {
173 stackpool[i].item.span.init()
174 lockInit(&stackpool[i].item.mu, lockRankStackpool)
175 }
176 for i := range stackLarge.free {
177 stackLarge.free[i].init()
178 lockInit(&stackLarge.lock, lockRankStackLarge)
179 }
180 }
181
182
183 func stacklog2(n uintptr) int {
184 log2 := 0
185 for n > 1 {
186 n >>= 1
187 log2++
188 }
189 return log2
190 }
191
192
193
194 func stackpoolalloc(order uint8) gclinkptr {
195 list := &stackpool[order].item.span
196 s := list.first
197 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
198 if s == nil {
199
200 s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
201 if s == nil {
202 throw("out of memory")
203 }
204 if s.allocCount != 0 {
205 throw("bad allocCount")
206 }
207 if s.manualFreeList.ptr() != nil {
208 throw("bad manualFreeList")
209 }
210 osStackAlloc(s)
211 s.elemsize = fixedStack << order
212 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
213 x := gclinkptr(s.base() + i)
214 if valgrindenabled {
215
216
217
218
219 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
220 }
221 x.ptr().next = s.manualFreeList
222 s.manualFreeList = x
223 }
224 list.insert(s)
225 }
226 x := s.manualFreeList
227 if x.ptr() == nil {
228 throw("span has no free stacks")
229 }
230 s.manualFreeList = x.ptr().next
231 s.allocCount++
232 if s.manualFreeList.ptr() == nil {
233
234 list.remove(s)
235 }
236 return x
237 }
238
239
240 func stackpoolfree(x gclinkptr, order uint8) {
241 s := spanOfUnchecked(uintptr(x))
242 if s.state.get() != mSpanManual {
243 throw("freeing stack not in a stack span")
244 }
245 if s.manualFreeList.ptr() == nil {
246
247 stackpool[order].item.span.insert(s)
248 }
249 x.ptr().next = s.manualFreeList
250 s.manualFreeList = x
251 s.allocCount--
252 if gcphase == _GCoff && s.allocCount == 0 {
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 stackpool[order].item.span.remove(s)
269 s.manualFreeList = 0
270 osStackFree(s)
271 mheap_.freeManual(s, spanAllocStack)
272 }
273 }
274
275
276
277
278
279 func stackcacherefill(c *mcache, order uint8) {
280 if stackDebug >= 1 {
281 print("stackcacherefill order=", order, "\n")
282 }
283
284
285
286 var list gclinkptr
287 var size uintptr
288 lock(&stackpool[order].item.mu)
289 for size < _StackCacheSize/2 {
290 x := stackpoolalloc(order)
291 x.ptr().next = list
292 list = x
293 size += fixedStack << order
294 }
295 unlock(&stackpool[order].item.mu)
296 c.stackcache[order].list = list
297 c.stackcache[order].size = size
298 }
299
300
301 func stackcacherelease(c *mcache, order uint8) {
302 if stackDebug >= 1 {
303 print("stackcacherelease order=", order, "\n")
304 }
305 x := c.stackcache[order].list
306 size := c.stackcache[order].size
307 lock(&stackpool[order].item.mu)
308 for size > _StackCacheSize/2 {
309 y := x.ptr().next
310 stackpoolfree(x, order)
311 x = y
312 size -= fixedStack << order
313 }
314 unlock(&stackpool[order].item.mu)
315 c.stackcache[order].list = x
316 c.stackcache[order].size = size
317 }
318
319
320 func stackcache_clear(c *mcache) {
321 if stackDebug >= 1 {
322 print("stackcache clear\n")
323 }
324 for order := uint8(0); order < _NumStackOrders; order++ {
325 lock(&stackpool[order].item.mu)
326 x := c.stackcache[order].list
327 for x.ptr() != nil {
328 y := x.ptr().next
329 stackpoolfree(x, order)
330 x = y
331 }
332 c.stackcache[order].list = 0
333 c.stackcache[order].size = 0
334 unlock(&stackpool[order].item.mu)
335 }
336 }
337
338
339
340
341
342
343
344 func stackalloc(n uint32) stack {
345
346
347
348 thisg := getg()
349 if thisg != thisg.m.g0 {
350 throw("stackalloc not on scheduler stack")
351 }
352 if n&(n-1) != 0 {
353 throw("stack size not a power of 2")
354 }
355 if stackDebug >= 1 {
356 print("stackalloc ", n, "\n")
357 }
358
359 if debug.efence != 0 || stackFromSystem != 0 {
360 n = uint32(alignUp(uintptr(n), physPageSize))
361 v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
362 if v == nil {
363 throw("out of memory (stackalloc)")
364 }
365 return stack{uintptr(v), uintptr(v) + uintptr(n)}
366 }
367
368
369
370
371 var v unsafe.Pointer
372 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
373 order := uint8(0)
374 n2 := n
375 for n2 > fixedStack {
376 order++
377 n2 >>= 1
378 }
379 var x gclinkptr
380 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
381
382
383
384
385 lock(&stackpool[order].item.mu)
386 x = stackpoolalloc(order)
387 unlock(&stackpool[order].item.mu)
388 } else {
389 c := thisg.m.p.ptr().mcache
390 x = c.stackcache[order].list
391 if x.ptr() == nil {
392 stackcacherefill(c, order)
393 x = c.stackcache[order].list
394 }
395 c.stackcache[order].list = x.ptr().next
396 c.stackcache[order].size -= uintptr(n)
397 }
398 if valgrindenabled {
399
400
401
402 valgrindFree(unsafe.Pointer(x.ptr()))
403 }
404 v = unsafe.Pointer(x)
405 } else {
406 var s *mspan
407 npage := uintptr(n) >> gc.PageShift
408 log2npage := stacklog2(npage)
409
410
411 lock(&stackLarge.lock)
412 if !stackLarge.free[log2npage].isEmpty() {
413 s = stackLarge.free[log2npage].first
414 stackLarge.free[log2npage].remove(s)
415 }
416 unlock(&stackLarge.lock)
417
418 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
419
420 if s == nil {
421
422 s = mheap_.allocManual(npage, spanAllocStack)
423 if s == nil {
424 throw("out of memory")
425 }
426 osStackAlloc(s)
427 s.elemsize = uintptr(n)
428 }
429 v = unsafe.Pointer(s.base())
430 }
431
432 if traceAllocFreeEnabled() {
433 trace := traceAcquire()
434 if trace.ok() {
435 trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
436 traceRelease(trace)
437 }
438 }
439 if raceenabled {
440 racemalloc(v, uintptr(n))
441 }
442 if msanenabled {
443 msanmalloc(v, uintptr(n))
444 }
445 if asanenabled {
446 asanunpoison(v, uintptr(n))
447 }
448 if valgrindenabled {
449 valgrindMalloc(v, uintptr(n))
450 }
451 if stackDebug >= 1 {
452 print(" allocated ", v, "\n")
453 }
454 return stack{uintptr(v), uintptr(v) + uintptr(n)}
455 }
456
457
458
459
460
461
462
463 func stackfree(stk stack) {
464 gp := getg()
465 v := unsafe.Pointer(stk.lo)
466 n := stk.hi - stk.lo
467 if n&(n-1) != 0 {
468 throw("stack not a power of 2")
469 }
470 if stk.lo+n < stk.hi {
471 throw("bad stack size")
472 }
473 if stackDebug >= 1 {
474 println("stackfree", v, n)
475 memclrNoHeapPointers(v, n)
476 }
477 if debug.efence != 0 || stackFromSystem != 0 {
478 if debug.efence != 0 || stackFaultOnFree != 0 {
479 sysFault(v, n)
480 } else {
481 sysFree(v, n, &memstats.stacks_sys)
482 }
483 return
484 }
485 if traceAllocFreeEnabled() {
486 trace := traceAcquire()
487 if trace.ok() {
488 trace.GoroutineStackFree(uintptr(v))
489 traceRelease(trace)
490 }
491 }
492 if msanenabled {
493 msanfree(v, n)
494 }
495 if asanenabled {
496 asanpoison(v, n)
497 }
498 if valgrindenabled {
499 valgrindFree(v)
500 }
501 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
502 order := uint8(0)
503 n2 := n
504 for n2 > fixedStack {
505 order++
506 n2 >>= 1
507 }
508 x := gclinkptr(v)
509 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
510 lock(&stackpool[order].item.mu)
511 if valgrindenabled {
512
513
514 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
515 }
516 stackpoolfree(x, order)
517 unlock(&stackpool[order].item.mu)
518 } else {
519 c := gp.m.p.ptr().mcache
520 if c.stackcache[order].size >= _StackCacheSize {
521 stackcacherelease(c, order)
522 }
523 if valgrindenabled {
524
525
526
527 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
528 }
529 x.ptr().next = c.stackcache[order].list
530 c.stackcache[order].list = x
531 c.stackcache[order].size += n
532 }
533 } else {
534 s := spanOfUnchecked(uintptr(v))
535 if s.state.get() != mSpanManual {
536 println(hex(s.base()), v)
537 throw("bad span state")
538 }
539 if gcphase == _GCoff {
540
541
542 osStackFree(s)
543 mheap_.freeManual(s, spanAllocStack)
544 } else {
545
546
547
548
549
550 log2npage := stacklog2(s.npages)
551 lock(&stackLarge.lock)
552 stackLarge.free[log2npage].insert(s)
553 unlock(&stackLarge.lock)
554 }
555 }
556 }
557
558 var maxstacksize uintptr = 1 << 20
559
560 var maxstackceiling = maxstacksize
561
562 var ptrnames = []string{
563 0: "scalar",
564 1: "ptr",
565 }
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600 type adjustinfo struct {
601 old stack
602 delta uintptr
603
604
605 sghi uintptr
606 }
607
608
609
610 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
611 pp := (*uintptr)(vpp)
612 p := *pp
613 if stackDebug >= 4 {
614 print(" ", pp, ":", hex(p), "\n")
615 }
616 if valgrindenabled {
617
618
619
620
621
622
623
624 valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p))
625 }
626 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
627 *pp = p + adjinfo.delta
628 if stackDebug >= 3 {
629 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
630 }
631 }
632 }
633
634
635
636 type bitvector struct {
637 n int32
638 bytedata *uint8
639 }
640
641
642
643
644
645 func (bv *bitvector) ptrbit(i uintptr) uint8 {
646 b := *(addb(bv.bytedata, i/8))
647 return (b >> (i % 8)) & 1
648 }
649
650
651
652 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
653 minp := adjinfo.old.lo
654 maxp := adjinfo.old.hi
655 delta := adjinfo.delta
656 num := uintptr(bv.n)
657
658
659
660
661
662 useCAS := uintptr(scanp) < adjinfo.sghi
663 for i := uintptr(0); i < num; i += 8 {
664 if stackDebug >= 4 {
665 for j := uintptr(0); j < 8; j++ {
666 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
667 }
668 }
669 b := *(addb(bv.bytedata, i/8))
670 for b != 0 {
671 j := uintptr(sys.TrailingZeros8(b))
672 b &= b - 1
673 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
674 retry:
675 p := *pp
676 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
677
678
679 getg().m.traceback = 2
680 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
681 throw("invalid pointer found on stack")
682 }
683 if minp <= p && p < maxp {
684 if stackDebug >= 3 {
685 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
686 }
687 if useCAS {
688 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
689 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
690 goto retry
691 }
692 } else {
693 *pp = p + delta
694 }
695 }
696 }
697 }
698 }
699
700
701 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
702 if frame.continpc == 0 {
703
704 return
705 }
706 f := frame.fn
707 if stackDebug >= 2 {
708 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
709 }
710
711
712 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
713 if stackDebug >= 3 {
714 print(" saved bp\n")
715 }
716 if debugCheckBP {
717
718
719 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
720 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
721 println("runtime: found invalid frame pointer")
722 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
723 throw("bad frame pointer")
724 }
725 }
726
727
728
729
730 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
731 }
732
733 locals, args, objs := frame.getStackMap(true)
734
735
736 if locals.n > 0 {
737 size := uintptr(locals.n) * goarch.PtrSize
738 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
739 }
740
741
742 if args.n > 0 {
743 if stackDebug >= 3 {
744 print(" args\n")
745 }
746 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
747 }
748
749
750
751 if frame.varp != 0 {
752 for i := range objs {
753 obj := &objs[i]
754 off := obj.off
755 base := frame.varp
756 if off >= 0 {
757 base = frame.argp
758 }
759 p := base + uintptr(off)
760 if p < frame.sp {
761
762
763
764 continue
765 }
766 ptrBytes, gcData := obj.gcdata()
767 for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
768 if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
769 adjustpointer(adjinfo, unsafe.Pointer(p+i))
770 }
771 }
772 }
773 }
774 }
775
776 func adjustctxt(gp *g, adjinfo *adjustinfo) {
777 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
778 if !framepointer_enabled {
779 return
780 }
781 if debugCheckBP {
782 bp := gp.sched.bp
783 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
784 println("runtime: found invalid top frame pointer")
785 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
786 throw("bad top frame pointer")
787 }
788 }
789 oldfp := gp.sched.bp
790 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
791 if GOARCH == "arm64" {
792
793
794
795 if oldfp == gp.sched.sp-goarch.PtrSize {
796 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
797 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
798 }
799 }
800 }
801
802 func adjustdefers(gp *g, adjinfo *adjustinfo) {
803
804
805
806 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
807 for d := gp._defer; d != nil; d = d.link {
808 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
809 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
810 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
811 }
812 }
813
814 func adjustpanics(gp *g, adjinfo *adjustinfo) {
815
816
817 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
818 }
819
820 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
821
822
823 for s := gp.waiting; s != nil; s = s.waitlink {
824 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
825 }
826 }
827
828 func fillstack(stk stack, b byte) {
829 for p := stk.lo; p < stk.hi; p++ {
830 *(*byte)(unsafe.Pointer(p)) = b
831 }
832 }
833
834 func findsghi(gp *g, stk stack) uintptr {
835 var sghi uintptr
836 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
837 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
838 if stk.lo <= p && p < stk.hi && p > sghi {
839 sghi = p
840 }
841 }
842 return sghi
843 }
844
845
846
847
848 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
849 if gp.waiting == nil {
850 return 0
851 }
852
853
854 var lastc *hchan
855 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
856 if sg.c != lastc {
857
858
859
860
861
862
863
864
865
866 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
867 }
868 lastc = sg.c
869 }
870
871
872 adjustsudogs(gp, adjinfo)
873
874
875
876
877 var sgsize uintptr
878 if adjinfo.sghi != 0 {
879 oldBot := adjinfo.old.hi - used
880 newBot := oldBot + adjinfo.delta
881 sgsize = adjinfo.sghi - oldBot
882 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
883 }
884
885
886 lastc = nil
887 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
888 if sg.c != lastc {
889 unlock(&sg.c.lock)
890 }
891 lastc = sg.c
892 }
893
894 return sgsize
895 }
896
897
898
899 func copystack(gp *g, newsize uintptr) {
900 if gp.syscallsp != 0 {
901 throw("stack growth not allowed in system call")
902 }
903 old := gp.stack
904 if old.lo == 0 {
905 throw("nil stackbase")
906 }
907 used := old.hi - gp.sched.sp
908
909
910
911
912 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
913
914
915 new := stackalloc(uint32(newsize))
916 if stackPoisonCopy != 0 {
917 fillstack(new, 0xfd)
918 }
919 if stackDebug >= 1 {
920 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
921 }
922
923
924 var adjinfo adjustinfo
925 adjinfo.old = old
926 adjinfo.delta = new.hi - old.hi
927
928
929 ncopy := used
930 if !gp.activeStackChans {
931 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
932
933
934
935
936 throw("racy sudog adjustment due to parking on channel")
937 }
938 adjustsudogs(gp, &adjinfo)
939 } else {
940
941
942
943
944
945
946
947 adjinfo.sghi = findsghi(gp, old)
948
949
950
951 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
952 }
953
954
955 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
956
957
958
959
960 adjustctxt(gp, &adjinfo)
961 adjustdefers(gp, &adjinfo)
962 adjustpanics(gp, &adjinfo)
963 if adjinfo.sghi != 0 {
964 adjinfo.sghi += adjinfo.delta
965 }
966
967
968 gp.stack = new
969 gp.stackguard0 = new.lo + stackGuard
970 gp.sched.sp = new.hi - used
971 gp.stktopsp += adjinfo.delta
972
973
974 var u unwinder
975 for u.init(gp, 0); u.valid(); u.next() {
976 adjustframe(&u.frame, &adjinfo)
977 }
978
979 if valgrindenabled {
980 if gp.valgrindStackID == 0 {
981 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
982 } else {
983 valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
984 }
985 }
986
987
988 if stackPoisonCopy != 0 {
989 fillstack(old, 0xfc)
990 }
991 stackfree(old)
992 }
993
994
995 func round2(x int32) int32 {
996 s := uint(0)
997 for 1<<s < x {
998 s++
999 }
1000 return 1 << s
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 func newstack() {
1016 thisg := getg()
1017
1018 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
1019 throw("stack growth after fork")
1020 }
1021 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
1022 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
1023 morebuf := thisg.m.morebuf
1024 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
1025 throw("runtime: wrong goroutine in newstack")
1026 }
1027
1028 gp := thisg.m.curg
1029
1030 if thisg.m.curg.throwsplit {
1031
1032 morebuf := thisg.m.morebuf
1033 gp.syscallsp = morebuf.sp
1034 gp.syscallpc = morebuf.pc
1035 pcname, pcoff := "(unknown)", uintptr(0)
1036 f := findfunc(gp.sched.pc)
1037 if f.valid() {
1038 pcname = funcname(f)
1039 pcoff = gp.sched.pc - f.entry()
1040 }
1041 print("runtime: newstack at ", pcname, "+", hex(pcoff),
1042 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1043 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1044 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1045
1046 thisg.m.traceback = 2
1047 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
1048 throw("runtime: stack split at bad time")
1049 }
1050
1051 morebuf := thisg.m.morebuf
1052 thisg.m.morebuf.pc = 0
1053 thisg.m.morebuf.lr = 0
1054 thisg.m.morebuf.sp = 0
1055 thisg.m.morebuf.g = 0
1056
1057
1058
1059
1060 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 preempt := stackguard0 == stackPreempt
1075 if preempt {
1076 if !canPreemptM(thisg.m) {
1077
1078
1079 gp.stackguard0 = gp.stack.lo + stackGuard
1080 gogo(&gp.sched)
1081 }
1082 }
1083
1084 if gp.stack.lo == 0 {
1085 throw("missing stack in newstack")
1086 }
1087 sp := gp.sched.sp
1088 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1089
1090 sp -= goarch.PtrSize
1091 }
1092 if stackDebug >= 1 || sp < gp.stack.lo {
1093 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1094 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1095 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1096 }
1097 if sp < gp.stack.lo {
1098 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1099 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1100 throw("runtime: split stack overflow")
1101 }
1102
1103 if preempt {
1104 if gp == thisg.m.g0 {
1105 throw("runtime: preempt g0")
1106 }
1107 if thisg.m.p == 0 && thisg.m.locks == 0 {
1108 throw("runtime: g is running but p is not")
1109 }
1110
1111 if gp.preemptShrink {
1112
1113
1114 gp.preemptShrink = false
1115 shrinkstack(gp)
1116 }
1117
1118
1119 gp.syncSafePoint = true
1120
1121 if gp.preemptStop {
1122 preemptPark(gp)
1123 }
1124
1125
1126 gopreempt_m(gp)
1127 }
1128
1129
1130 oldsize := gp.stack.hi - gp.stack.lo
1131 newsize := oldsize * 2
1132
1133
1134
1135
1136 if f := findfunc(gp.sched.pc); f.valid() {
1137 max := uintptr(funcMaxSPDelta(f))
1138 needed := max + stackGuard
1139 used := gp.stack.hi - gp.sched.sp
1140 for newsize-used < needed {
1141 newsize *= 2
1142 }
1143 }
1144
1145 if stackguard0 == stackForceMove {
1146
1147
1148
1149 newsize = oldsize
1150 }
1151
1152 if newsize > maxstacksize || newsize > maxstackceiling {
1153 if maxstacksize < maxstackceiling {
1154 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1155 } else {
1156 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1157 }
1158 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1159 throw("stack overflow")
1160 }
1161
1162
1163
1164 casgstatus(gp, _Grunning, _Gcopystack)
1165
1166
1167
1168 copystack(gp, newsize)
1169 if stackDebug >= 1 {
1170 print("stack grow done\n")
1171 }
1172 casgstatus(gp, _Gcopystack, _Grunning)
1173 gogo(&gp.sched)
1174 }
1175
1176
1177 func nilfunc() {
1178 *(*uint8)(nil) = 0
1179 }
1180
1181
1182
1183 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1184 var fn unsafe.Pointer
1185 if fv != nil {
1186 fn = unsafe.Pointer(fv.fn)
1187 } else {
1188 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1189 }
1190 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1191 }
1192
1193
1194
1195
1196
1197 func isShrinkStackSafe(gp *g) bool {
1198
1199
1200
1201
1202 if gp.syscallsp != 0 {
1203 return false
1204 }
1205
1206
1207
1208 if gp.asyncSafePoint {
1209 return false
1210 }
1211
1212
1213
1214 if gp.parkingOnChan.Load() {
1215 return false
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225 if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() {
1226 return false
1227 }
1228 return true
1229 }
1230
1231
1232
1233
1234
1235 func shrinkstack(gp *g) {
1236 if gp.stack.lo == 0 {
1237 throw("missing stack in shrinkstack")
1238 }
1239 if s := readgstatus(gp); s&_Gscan == 0 {
1240
1241
1242
1243 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1244
1245 throw("bad status in shrinkstack")
1246 }
1247 }
1248 if !isShrinkStackSafe(gp) {
1249 throw("shrinkstack at bad time")
1250 }
1251
1252
1253
1254 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1255 throw("shrinking stack in libcall")
1256 }
1257
1258 if debug.gcshrinkstackoff > 0 {
1259 return
1260 }
1261 f := findfunc(gp.startpc)
1262 if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
1263
1264
1265 return
1266 }
1267
1268 oldsize := gp.stack.hi - gp.stack.lo
1269 newsize := oldsize / 2
1270
1271
1272 if newsize < fixedStack {
1273 return
1274 }
1275
1276
1277
1278
1279
1280 avail := gp.stack.hi - gp.stack.lo
1281 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1282 return
1283 }
1284
1285 if stackDebug > 0 {
1286 print("shrinking stack ", oldsize, "->", newsize, "\n")
1287 }
1288
1289 copystack(gp, newsize)
1290 }
1291
1292
1293 func freeStackSpans() {
1294
1295 for order := range stackpool {
1296 lock(&stackpool[order].item.mu)
1297 list := &stackpool[order].item.span
1298 for s := list.first; s != nil; {
1299 next := s.next
1300 if s.allocCount == 0 {
1301 list.remove(s)
1302 s.manualFreeList = 0
1303 osStackFree(s)
1304 mheap_.freeManual(s, spanAllocStack)
1305 }
1306 s = next
1307 }
1308 unlock(&stackpool[order].item.mu)
1309 }
1310
1311
1312 lock(&stackLarge.lock)
1313 for i := range stackLarge.free {
1314 for s := stackLarge.free[i].first; s != nil; {
1315 next := s.next
1316 stackLarge.free[i].remove(s)
1317 osStackFree(s)
1318 mheap_.freeManual(s, spanAllocStack)
1319 s = next
1320 }
1321 }
1322 unlock(&stackLarge.lock)
1323 }
1324
1325
1326
1327 type stackObjectRecord struct {
1328
1329
1330
1331 off int32
1332 size int32
1333 ptrBytes int32
1334 gcdataoff uint32
1335 }
1336
1337
1338
1339
1340 func (r *stackObjectRecord) gcdata() (uintptr, *byte) {
1341 ptr := uintptr(unsafe.Pointer(r))
1342 var mod *moduledata
1343 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1344 if datap.gofunc <= ptr && ptr < datap.end {
1345 mod = datap
1346 break
1347 }
1348 }
1349
1350
1351
1352 res := mod.rodata + uintptr(r.gcdataoff)
1353 return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res))
1354 }
1355
1356
1357
1358
1359
1360 func morestackc() {
1361 throw("attempt to execute system stack code on user stack")
1362 }
1363
1364
1365
1366
1367
1368 var startingStackSize uint32 = fixedStack
1369
1370 func gcComputeStartingStackSize() {
1371 if debug.adaptivestackstart == 0 {
1372 return
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 var scannedStackSize uint64
1385 var scannedStacks uint64
1386 for _, p := range allp {
1387 scannedStackSize += p.scannedStackSize
1388 scannedStacks += p.scannedStacks
1389
1390 p.scannedStackSize = 0
1391 p.scannedStacks = 0
1392 }
1393 if scannedStacks == 0 {
1394 startingStackSize = fixedStack
1395 return
1396 }
1397 avg := scannedStackSize/scannedStacks + stackGuard
1398
1399
1400 if avg > uint64(maxstacksize) {
1401 avg = uint64(maxstacksize)
1402 }
1403 if avg < fixedStack {
1404 avg = fixedStack
1405 }
1406
1407 startingStackSize = uint32(round2(int32(avg)))
1408 }
1409
View as plain text