Source file src/runtime/panic.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "internal/runtime/atomic" 11 "internal/runtime/sys" 12 "internal/stringslite" 13 "unsafe" 14 ) 15 16 // throwType indicates the current type of ongoing throw, which affects the 17 // amount of detail printed to stderr. Higher values include more detail. 18 type throwType uint32 19 20 const ( 21 // throwTypeNone means that we are not throwing. 22 throwTypeNone throwType = iota 23 24 // throwTypeUser is a throw due to a problem with the application. 25 // 26 // These throws do not include runtime frames, system goroutines, or 27 // frame metadata. 28 throwTypeUser 29 30 // throwTypeRuntime is a throw due to a problem with Go itself. 31 // 32 // These throws include as much information as possible to aid in 33 // debugging the runtime, including runtime frames, system goroutines, 34 // and frame metadata. 35 throwTypeRuntime 36 ) 37 38 // We have two different ways of doing defers. The older way involves creating a 39 // defer record at the time that a defer statement is executing and adding it to a 40 // defer chain. This chain is inspected by the deferreturn call at all function 41 // exits in order to run the appropriate defer calls. A cheaper way (which we call 42 // open-coded defers) is used for functions in which no defer statements occur in 43 // loops. In that case, we simply store the defer function/arg information into 44 // specific stack slots at the point of each defer statement, as well as setting a 45 // bit in a bitmask. At each function exit, we add inline code to directly make 46 // the appropriate defer calls based on the bitmask and fn/arg information stored 47 // on the stack. During panic/Goexit processing, the appropriate defer calls are 48 // made using extra funcdata info that indicates the exact stack slots that 49 // contain the bitmask and defer fn/args. 50 51 // Check to make sure we can really generate a panic. If the panic 52 // was generated from the runtime, or from inside malloc, then convert 53 // to a throw of msg. 54 // pc should be the program counter of the compiler-generated code that 55 // triggered this panic. 56 func panicCheck1(pc uintptr, msg string) { 57 if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { 58 // Note: wasm can't tail call, so we can't get the original caller's pc. 59 throw(msg) 60 } 61 // TODO: is this redundant? How could we be in malloc 62 // but not in the runtime? runtime/internal/*, maybe? 63 gp := getg() 64 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 65 throw(msg) 66 } 67 } 68 69 // Same as above, but calling from the runtime is allowed. 70 // 71 // Using this function is necessary for any panic that may be 72 // generated by runtime.sigpanic, since those are always called by the 73 // runtime. 74 func panicCheck2(err string) { 75 // panic allocates, so to avoid recursive malloc, turn panics 76 // during malloc into throws. 77 gp := getg() 78 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 79 throw(err) 80 } 81 } 82 83 // Many of the following panic entry-points turn into throws when they 84 // happen in various runtime contexts. These should never happen in 85 // the runtime, and if they do, they indicate a serious issue and 86 // should not be caught by user code. 87 // 88 // The panic{Index,Slice,divide,shift} functions are called by 89 // code generated by the compiler for out of bounds index expressions, 90 // out of bounds slice expressions, division by zero, and shift by negative. 91 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 92 // functions are called by the signal handler when a signal occurs 93 // indicating the respective problem. 94 // 95 // Since panic{Index,Slice,shift} are never called directly, and 96 // since the runtime package should never have an out of bounds slice 97 // or array reference or negative shift, if we see those functions called from the 98 // runtime package we turn the panic into a throw. That will dump the 99 // entire runtime stack for easier debugging. 100 // 101 // The entry points called by the signal handler will be called from 102 // runtime.sigpanic, so we can't disallow calls from the runtime to 103 // these (they always look like they're called from the runtime). 104 // Hence, for these, we just check for clearly bad runtime conditions. 105 // 106 // The panic{Index,Slice} functions are implemented in assembly and tail call 107 // to the goPanic{Index,Slice} functions below. This is done so we can use 108 // a space-minimal register calling convention. 109 110 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 111 // 112 //go:yeswritebarrierrec 113 func goPanicIndex(x int, y int) { 114 panicCheck1(sys.GetCallerPC(), "index out of range") 115 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 116 } 117 118 //go:yeswritebarrierrec 119 func goPanicIndexU(x uint, y int) { 120 panicCheck1(sys.GetCallerPC(), "index out of range") 121 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 122 } 123 124 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 125 // 126 //go:yeswritebarrierrec 127 func goPanicSliceAlen(x int, y int) { 128 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 129 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 130 } 131 132 //go:yeswritebarrierrec 133 func goPanicSliceAlenU(x uint, y int) { 134 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 135 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 136 } 137 138 //go:yeswritebarrierrec 139 func goPanicSliceAcap(x int, y int) { 140 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 141 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 142 } 143 144 //go:yeswritebarrierrec 145 func goPanicSliceAcapU(x uint, y int) { 146 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 147 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 148 } 149 150 // failures in the comparisons for s[x:y], 0 <= x <= y 151 // 152 //go:yeswritebarrierrec 153 func goPanicSliceB(x int, y int) { 154 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 155 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 156 } 157 158 //go:yeswritebarrierrec 159 func goPanicSliceBU(x uint, y int) { 160 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 161 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 162 } 163 164 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 165 func goPanicSlice3Alen(x int, y int) { 166 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 167 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 168 } 169 func goPanicSlice3AlenU(x uint, y int) { 170 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 171 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 172 } 173 func goPanicSlice3Acap(x int, y int) { 174 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 175 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 176 } 177 func goPanicSlice3AcapU(x uint, y int) { 178 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 179 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 180 } 181 182 // failures in the comparisons for s[:x:y], 0 <= x <= y 183 func goPanicSlice3B(x int, y int) { 184 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 185 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 186 } 187 func goPanicSlice3BU(x uint, y int) { 188 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 189 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 190 } 191 192 // failures in the comparisons for s[x:y:], 0 <= x <= y 193 func goPanicSlice3C(x int, y int) { 194 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 195 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 196 } 197 func goPanicSlice3CU(x uint, y int) { 198 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 199 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 200 } 201 202 // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) 203 func goPanicSliceConvert(x int, y int) { 204 panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array") 205 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 206 } 207 208 // Implemented in assembly, as they take arguments in registers. 209 // Declared here to mark them as ABIInternal. 210 func panicIndex(x int, y int) 211 func panicIndexU(x uint, y int) 212 func panicSliceAlen(x int, y int) 213 func panicSliceAlenU(x uint, y int) 214 func panicSliceAcap(x int, y int) 215 func panicSliceAcapU(x uint, y int) 216 func panicSliceB(x int, y int) 217 func panicSliceBU(x uint, y int) 218 func panicSlice3Alen(x int, y int) 219 func panicSlice3AlenU(x uint, y int) 220 func panicSlice3Acap(x int, y int) 221 func panicSlice3AcapU(x uint, y int) 222 func panicSlice3B(x int, y int) 223 func panicSlice3BU(x uint, y int) 224 func panicSlice3C(x int, y int) 225 func panicSlice3CU(x uint, y int) 226 func panicSliceConvert(x int, y int) 227 228 var shiftError = error(errorString("negative shift amount")) 229 230 //go:yeswritebarrierrec 231 func panicshift() { 232 panicCheck1(sys.GetCallerPC(), "negative shift amount") 233 panic(shiftError) 234 } 235 236 var divideError = error(errorString("integer divide by zero")) 237 238 //go:yeswritebarrierrec 239 func panicdivide() { 240 panicCheck2("integer divide by zero") 241 panic(divideError) 242 } 243 244 var overflowError = error(errorString("integer overflow")) 245 246 func panicoverflow() { 247 panicCheck2("integer overflow") 248 panic(overflowError) 249 } 250 251 var floatError = error(errorString("floating point error")) 252 253 func panicfloat() { 254 panicCheck2("floating point error") 255 panic(floatError) 256 } 257 258 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 259 260 func panicmem() { 261 panicCheck2("invalid memory address or nil pointer dereference") 262 panic(memoryError) 263 } 264 265 func panicmemAddr(addr uintptr) { 266 panicCheck2("invalid memory address or nil pointer dereference") 267 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 268 } 269 270 // Create a new deferred function fn, which has no arguments and results. 271 // The compiler turns a defer statement into a call to this. 272 func deferproc(fn func()) { 273 gp := getg() 274 if gp.m.curg != gp { 275 // go code on the system stack can't defer 276 throw("defer on system stack") 277 } 278 279 d := newdefer() 280 d.link = gp._defer 281 gp._defer = d 282 d.fn = fn 283 d.pc = sys.GetCallerPC() 284 // We must not be preempted between calling GetCallerSP and 285 // storing it to d.sp because GetCallerSP's result is a 286 // uintptr stack pointer. 287 d.sp = sys.GetCallerSP() 288 289 // deferproc returns 0 normally. 290 // a deferred func that stops a panic 291 // makes the deferproc return 1. 292 // the code the compiler generates always 293 // checks the return value and jumps to the 294 // end of the function if deferproc returns != 0. 295 return0() 296 // No code can go here - the C return register has 297 // been set and must not be clobbered. 298 } 299 300 var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false")) 301 var rangePanicError = error(errorString("range function continued iteration after loop body panic")) 302 var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit")) 303 var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking")) 304 305 //go:noinline 306 func panicrangestate(state int) { 307 switch abi.RF_State(state) { 308 case abi.RF_DONE: 309 panic(rangeDoneError) 310 case abi.RF_PANIC: 311 panic(rangePanicError) 312 case abi.RF_EXHAUSTED: 313 panic(rangeExhaustedError) 314 case abi.RF_MISSING_PANIC: 315 panic(rangeMissingPanicError) 316 } 317 throw("unexpected state passed to panicrangestate") 318 } 319 320 // deferrangefunc is called by functions that are about to 321 // execute a range-over-function loop in which the loop body 322 // may execute a defer statement. That defer needs to add to 323 // the chain for the current function, not the func literal synthesized 324 // to represent the loop body. To do that, the original function 325 // calls deferrangefunc to obtain an opaque token representing 326 // the current frame, and then the loop body uses deferprocat 327 // instead of deferproc to add to that frame's defer lists. 328 // 329 // The token is an 'any' with underlying type *atomic.Pointer[_defer]. 330 // It is the atomically-updated head of a linked list of _defer structs 331 // representing deferred calls. At the same time, we create a _defer 332 // struct on the main g._defer list with d.head set to this head pointer. 333 // 334 // The g._defer list is now a linked list of deferred calls, 335 // but an atomic list hanging off: 336 // 337 // g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil 338 // | .head 339 // | 340 // +--> dY -> dX -> nil 341 // 342 // with each -> indicating a d.link pointer, and where drangefunc 343 // has the d.rangefunc = true bit set. 344 // Note that the function being ranged over may have added 345 // its own defers (d4 and d3), so drangefunc need not be at the 346 // top of the list when deferprocat is used. This is why we pass 347 // the atomic head explicitly. 348 // 349 // To keep misbehaving programs from crashing the runtime, 350 // deferprocat pushes new defers onto the .head list atomically. 351 // The fact that it is a separate list from the main goroutine 352 // defer list means that the main goroutine's defers can still 353 // be handled non-atomically. 354 // 355 // In the diagram, dY and dX are meant to be processed when 356 // drangefunc would be processed, which is to say the defer order 357 // should be d4, d3, dY, dX, d2, d1. To make that happen, 358 // when defer processing reaches a d with rangefunc=true, 359 // it calls deferconvert to atomically take the extras 360 // away from d.head and then adds them to the main list. 361 // 362 // That is, deferconvert changes this list: 363 // 364 // g._defer => drangefunc -> d2 -> d1 -> nil 365 // | .head 366 // | 367 // +--> dY -> dX -> nil 368 // 369 // into this list: 370 // 371 // g._defer => dY -> dX -> d2 -> d1 -> nil 372 // 373 // It also poisons *drangefunc.head so that any future 374 // deferprocat using that head will throw. 375 // (The atomic head is ordinary garbage collected memory so that 376 // it's not a problem if user code holds onto it beyond 377 // the lifetime of drangefunc.) 378 // 379 // TODO: We could arrange for the compiler to call into the 380 // runtime after the loop finishes normally, to do an eager 381 // deferconvert, which would catch calling the loop body 382 // and having it defer after the loop is done. If we have a 383 // more general catch of loop body misuse, though, this 384 // might not be worth worrying about in addition. 385 // 386 // See also ../cmd/compile/internal/rangefunc/rewrite.go. 387 func deferrangefunc() any { 388 gp := getg() 389 if gp.m.curg != gp { 390 // go code on the system stack can't defer 391 throw("defer on system stack") 392 } 393 394 d := newdefer() 395 d.link = gp._defer 396 gp._defer = d 397 d.pc = sys.GetCallerPC() 398 // We must not be preempted between calling GetCallerSP and 399 // storing it to d.sp because GetCallerSP's result is a 400 // uintptr stack pointer. 401 d.sp = sys.GetCallerSP() 402 403 d.rangefunc = true 404 d.head = new(atomic.Pointer[_defer]) 405 406 return d.head 407 } 408 409 // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head. 410 func badDefer() *_defer { 411 return (*_defer)(unsafe.Pointer(uintptr(1))) 412 } 413 414 // deferprocat is like deferproc but adds to the atomic list represented by frame. 415 // See the doc comment for deferrangefunc for details. 416 func deferprocat(fn func(), frame any) { 417 head := frame.(*atomic.Pointer[_defer]) 418 if raceenabled { 419 racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat)) 420 } 421 d1 := newdefer() 422 d1.fn = fn 423 for { 424 d1.link = head.Load() 425 if d1.link == badDefer() { 426 throw("defer after range func returned") 427 } 428 if head.CompareAndSwap(d1.link, d1) { 429 break 430 } 431 } 432 433 // Must be last - see deferproc above. 434 return0() 435 } 436 437 // deferconvert converts the rangefunc defer list of d0 into an ordinary list 438 // following d0. 439 // See the doc comment for deferrangefunc for details. 440 func deferconvert(d0 *_defer) { 441 head := d0.head 442 if raceenabled { 443 racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert)) 444 } 445 tail := d0.link 446 d0.rangefunc = false 447 448 var d *_defer 449 for { 450 d = head.Load() 451 if head.CompareAndSwap(d, badDefer()) { 452 break 453 } 454 } 455 if d == nil { 456 return 457 } 458 for d1 := d; ; d1 = d1.link { 459 d1.sp = d0.sp 460 d1.pc = d0.pc 461 if d1.link == nil { 462 d1.link = tail 463 break 464 } 465 } 466 d0.link = d 467 return 468 } 469 470 // deferprocStack queues a new deferred function with a defer record on the stack. 471 // The defer record must have its fn field initialized. 472 // All other fields can contain junk. 473 // Nosplit because of the uninitialized pointer fields on the stack. 474 // 475 //go:nosplit 476 func deferprocStack(d *_defer) { 477 gp := getg() 478 if gp.m.curg != gp { 479 // go code on the system stack can't defer 480 throw("defer on system stack") 481 } 482 // fn is already set. 483 // The other fields are junk on entry to deferprocStack and 484 // are initialized here. 485 d.heap = false 486 d.rangefunc = false 487 d.sp = sys.GetCallerSP() 488 d.pc = sys.GetCallerPC() 489 // The lines below implement: 490 // d.panic = nil 491 // d.fd = nil 492 // d.link = gp._defer 493 // d.head = nil 494 // gp._defer = d 495 // But without write barriers. The first three are writes to 496 // the stack so they don't need a write barrier, and furthermore 497 // are to uninitialized memory, so they must not use a write barrier. 498 // The fourth write does not require a write barrier because we 499 // explicitly mark all the defer structures, so we don't need to 500 // keep track of pointers to them with a write barrier. 501 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 502 *(*uintptr)(unsafe.Pointer(&d.head)) = 0 503 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 504 505 return0() 506 // No code can go here - the C return register has 507 // been set and must not be clobbered. 508 } 509 510 // Each P holds a pool for defers. 511 512 // Allocate a Defer, usually using per-P pool. 513 // Each defer must be released with freedefer. The defer is not 514 // added to any defer chain yet. 515 func newdefer() *_defer { 516 var d *_defer 517 mp := acquirem() 518 pp := mp.p.ptr() 519 if len(pp.deferpool) == 0 && sched.deferpool != nil { 520 lock(&sched.deferlock) 521 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 522 d := sched.deferpool 523 sched.deferpool = d.link 524 d.link = nil 525 pp.deferpool = append(pp.deferpool, d) 526 } 527 unlock(&sched.deferlock) 528 } 529 if n := len(pp.deferpool); n > 0 { 530 d = pp.deferpool[n-1] 531 pp.deferpool[n-1] = nil 532 pp.deferpool = pp.deferpool[:n-1] 533 } 534 releasem(mp) 535 mp, pp = nil, nil 536 537 if d == nil { 538 // Allocate new defer. 539 d = new(_defer) 540 } 541 d.heap = true 542 return d 543 } 544 545 // popDefer pops the head of gp's defer list and frees it. 546 func popDefer(gp *g) { 547 d := gp._defer 548 d.fn = nil // Can in theory point to the stack 549 // We must not copy the stack between the updating gp._defer and setting 550 // d.link to nil. Between these two steps, d is not on any defer list, so 551 // stack copying won't adjust stack pointers in it (namely, d.link). Hence, 552 // if we were to copy the stack, d could then contain a stale pointer. 553 gp._defer = d.link 554 d.link = nil 555 // After this point we can copy the stack. 556 557 if !d.heap { 558 return 559 } 560 561 mp := acquirem() 562 pp := mp.p.ptr() 563 if len(pp.deferpool) == cap(pp.deferpool) { 564 // Transfer half of local cache to the central cache. 565 var first, last *_defer 566 for len(pp.deferpool) > cap(pp.deferpool)/2 { 567 n := len(pp.deferpool) 568 d := pp.deferpool[n-1] 569 pp.deferpool[n-1] = nil 570 pp.deferpool = pp.deferpool[:n-1] 571 if first == nil { 572 first = d 573 } else { 574 last.link = d 575 } 576 last = d 577 } 578 lock(&sched.deferlock) 579 last.link = sched.deferpool 580 sched.deferpool = first 581 unlock(&sched.deferlock) 582 } 583 584 *d = _defer{} 585 586 pp.deferpool = append(pp.deferpool, d) 587 588 releasem(mp) 589 mp, pp = nil, nil 590 } 591 592 // deferreturn runs deferred functions for the caller's frame. 593 // The compiler inserts a call to this at the end of any 594 // function which calls defer. 595 func deferreturn() { 596 var p _panic 597 p.deferreturn = true 598 599 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 600 for { 601 fn, ok := p.nextDefer() 602 if !ok { 603 break 604 } 605 fn() 606 } 607 } 608 609 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 610 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 611 // is not a panic, any recover calls in those deferred functions will return nil. 612 // 613 // Calling Goexit from the main goroutine terminates that goroutine 614 // without func main returning. Since func main has not returned, 615 // the program continues execution of other goroutines. 616 // If all other goroutines exit, the program crashes. 617 // 618 // It crashes if called from a thread not created by the Go runtime. 619 func Goexit() { 620 // Create a panic object for Goexit, so we can recognize when it might be 621 // bypassed by a recover(). 622 var p _panic 623 p.goexit = true 624 625 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 626 for { 627 fn, ok := p.nextDefer() 628 if !ok { 629 break 630 } 631 fn() 632 } 633 634 goexit1() 635 } 636 637 // Call all Error and String methods before freezing the world. 638 // Used when crashing with panicking. 639 func preprintpanics(p *_panic) { 640 defer func() { 641 text := "panic while printing panic value" 642 switch r := recover().(type) { 643 case nil: 644 // nothing to do 645 case string: 646 throw(text + ": " + r) 647 default: 648 throw(text + ": type " + toRType(efaceOf(&r)._type).string()) 649 } 650 }() 651 for p != nil { 652 switch v := p.arg.(type) { 653 case error: 654 p.arg = v.Error() 655 case stringer: 656 p.arg = v.String() 657 } 658 p = p.link 659 } 660 } 661 662 // Print all currently active panics. Used when crashing. 663 // Should only be called after preprintpanics. 664 func printpanics(p *_panic) { 665 if p.link != nil { 666 printpanics(p.link) 667 if !p.link.goexit { 668 print("\t") 669 } 670 } 671 if p.goexit { 672 return 673 } 674 print("panic: ") 675 printpanicval(p.arg) 676 if p.recovered { 677 print(" [recovered]") 678 } 679 print("\n") 680 } 681 682 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 683 // uint32 and a pointer to the byte following the varint. 684 // 685 // The implementation is the same with runtime.readvarint, except that this function 686 // uses unsafe.Pointer for speed. 687 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 688 var r uint32 689 var shift int 690 for { 691 b := *(*uint8)(fd) 692 fd = add(fd, unsafe.Sizeof(b)) 693 if b < 128 { 694 return r + uint32(b)<<shift, fd 695 } 696 r += uint32(b&0x7F) << (shift & 31) 697 shift += 7 698 if shift > 28 { 699 panic("Bad varint") 700 } 701 } 702 } 703 704 // A PanicNilError happens when code calls panic(nil). 705 // 706 // Before Go 1.21, programs that called panic(nil) observed recover returning nil. 707 // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. 708 // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. 709 type PanicNilError struct { 710 // This field makes PanicNilError structurally different from 711 // any other struct in this package, and the _ makes it different 712 // from any struct in other packages too. 713 // This avoids any accidental conversions being possible 714 // between this struct and some other struct sharing the same fields, 715 // like happened in go.dev/issue/56603. 716 _ [0]*PanicNilError 717 } 718 719 func (*PanicNilError) Error() string { return "panic called with nil argument" } 720 func (*PanicNilError) RuntimeError() {} 721 722 var panicnil = &godebugInc{name: "panicnil"} 723 724 // The implementation of the predeclared function panic. 725 // The compiler emits calls to this function. 726 // 727 // gopanic should be an internal detail, 728 // but widely used packages access it using linkname. 729 // Notable members of the hall of shame include: 730 // - go.undefinedlabs.com/scopeagent 731 // - github.com/goplus/igop 732 // 733 // Do not remove or change the type signature. 734 // See go.dev/issue/67401. 735 // 736 //go:linkname gopanic 737 func gopanic(e any) { 738 if e == nil { 739 if debug.panicnil.Load() != 1 { 740 e = new(PanicNilError) 741 } else { 742 panicnil.IncNonDefault() 743 } 744 } 745 746 gp := getg() 747 if gp.m.curg != gp { 748 print("panic: ") 749 printpanicval(e) 750 print("\n") 751 throw("panic on system stack") 752 } 753 754 if gp.m.mallocing != 0 { 755 print("panic: ") 756 printpanicval(e) 757 print("\n") 758 throw("panic during malloc") 759 } 760 if gp.m.preemptoff != "" { 761 print("panic: ") 762 printpanicval(e) 763 print("\n") 764 print("preempt off reason: ") 765 print(gp.m.preemptoff) 766 print("\n") 767 throw("panic during preemptoff") 768 } 769 if gp.m.locks != 0 { 770 print("panic: ") 771 printpanicval(e) 772 print("\n") 773 throw("panic holding locks") 774 } 775 776 var p _panic 777 p.arg = e 778 779 runningPanicDefers.Add(1) 780 781 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 782 for { 783 fn, ok := p.nextDefer() 784 if !ok { 785 break 786 } 787 fn() 788 } 789 790 // If we're tracing, flush the current generation to make the trace more 791 // readable. 792 // 793 // TODO(aktau): Handle a panic from within traceAdvance more gracefully. 794 // Currently it would hang. Not handled now because it is very unlikely, and 795 // already unrecoverable. 796 if traceEnabled() { 797 traceAdvance(false) 798 } 799 800 // ran out of deferred calls - old-school panic now 801 // Because it is unsafe to call arbitrary user code after freezing 802 // the world, we call preprintpanics to invoke all necessary Error 803 // and String methods to prepare the panic strings before startpanic. 804 preprintpanics(&p) 805 806 fatalpanic(&p) // should not return 807 *(*int)(nil) = 0 // not reached 808 } 809 810 // start initializes a panic to start unwinding the stack. 811 // 812 // If p.goexit is true, then start may return multiple times. 813 func (p *_panic) start(pc uintptr, sp unsafe.Pointer) { 814 gp := getg() 815 816 // Record the caller's PC and SP, so recovery can identify panics 817 // that have been recovered. Also, so that if p is from Goexit, we 818 // can restart its defer processing loop if a recovered panic tries 819 // to jump past it. 820 p.startPC = sys.GetCallerPC() 821 p.startSP = unsafe.Pointer(sys.GetCallerSP()) 822 823 if p.deferreturn { 824 p.sp = sp 825 826 if s := (*savedOpenDeferState)(gp.param); s != nil { 827 // recovery saved some state for us, so that we can resume 828 // calling open-coded defers without unwinding the stack. 829 830 gp.param = nil 831 832 p.retpc = s.retpc 833 p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset)) 834 p.slotsPtr = add(sp, s.slotsOffset) 835 } 836 837 return 838 } 839 840 p.link = gp._panic 841 gp._panic = (*_panic)(noescape(unsafe.Pointer(p))) 842 843 // Initialize state machine, and find the first frame with a defer. 844 // 845 // Note: We could use startPC and startSP here, but callers will 846 // never have defer statements themselves. By starting at their 847 // caller instead, we avoid needing to unwind through an extra 848 // frame. It also somewhat simplifies the terminating condition for 849 // deferreturn. 850 p.lr, p.fp = pc, sp 851 p.nextFrame() 852 } 853 854 // nextDefer returns the next deferred function to invoke, if any. 855 // 856 // Note: The "ok bool" result is necessary to correctly handle when 857 // the deferred function itself was nil (e.g., "defer (func())(nil)"). 858 func (p *_panic) nextDefer() (func(), bool) { 859 gp := getg() 860 861 if !p.deferreturn { 862 if gp._panic != p { 863 throw("bad panic stack") 864 } 865 866 if p.recovered { 867 mcall(recovery) // does not return 868 throw("recovery failed") 869 } 870 } 871 872 // The assembler adjusts p.argp in wrapper functions that shouldn't 873 // be visible to recover(), so we need to restore it each iteration. 874 p.argp = add(p.startSP, sys.MinFrameSize) 875 876 for { 877 for p.deferBitsPtr != nil { 878 bits := *p.deferBitsPtr 879 880 // Check whether any open-coded defers are still pending. 881 // 882 // Note: We need to check this upfront (rather than after 883 // clearing the top bit) because it's possible that Goexit 884 // invokes a deferred call, and there were still more pending 885 // open-coded defers in the frame; but then the deferred call 886 // panic and invoked the remaining defers in the frame, before 887 // recovering and restarting the Goexit loop. 888 if bits == 0 { 889 p.deferBitsPtr = nil 890 break 891 } 892 893 // Find index of top bit set. 894 i := 7 - uintptr(sys.LeadingZeros8(bits)) 895 896 // Clear bit and store it back. 897 bits &^= 1 << i 898 *p.deferBitsPtr = bits 899 900 return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true 901 } 902 903 Recheck: 904 if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { 905 if d.rangefunc { 906 deferconvert(d) 907 popDefer(gp) 908 goto Recheck 909 } 910 911 fn := d.fn 912 913 // TODO(mdempsky): Instead of having each deferproc call have 914 // its own "deferreturn(); return" sequence, we should just make 915 // them reuse the one we emit for open-coded defers. 916 p.retpc = d.pc 917 918 // Unlink and free. 919 popDefer(gp) 920 921 return fn, true 922 } 923 924 if !p.nextFrame() { 925 return nil, false 926 } 927 } 928 } 929 930 // nextFrame finds the next frame that contains deferred calls, if any. 931 func (p *_panic) nextFrame() (ok bool) { 932 if p.lr == 0 { 933 return false 934 } 935 936 gp := getg() 937 systemstack(func() { 938 var limit uintptr 939 if d := gp._defer; d != nil { 940 limit = d.sp 941 } 942 943 var u unwinder 944 u.initAt(p.lr, uintptr(p.fp), 0, gp, 0) 945 for { 946 if !u.valid() { 947 p.lr = 0 948 return // ok == false 949 } 950 951 // TODO(mdempsky): If we populate u.frame.fn.deferreturn for 952 // every frame containing a defer (not just open-coded defers), 953 // then we can simply loop until we find the next frame where 954 // it's non-zero. 955 956 if u.frame.sp == limit { 957 break // found a frame with linked defers 958 } 959 960 if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) { 961 break // found a frame with open-coded defers 962 } 963 964 u.next() 965 } 966 967 p.lr = u.frame.lr 968 p.sp = unsafe.Pointer(u.frame.sp) 969 p.fp = unsafe.Pointer(u.frame.fp) 970 971 ok = true 972 }) 973 974 return 975 } 976 977 func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { 978 fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo) 979 if fd == nil { 980 return false 981 } 982 983 if fn.deferreturn == 0 { 984 throw("missing deferreturn") 985 } 986 987 deferBitsOffset, fd := readvarintUnsafe(fd) 988 deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset))) 989 if *deferBitsPtr == 0 { 990 return false // has open-coded defers, but none pending 991 } 992 993 slotsOffset, fd := readvarintUnsafe(fd) 994 995 p.retpc = fn.entry() + uintptr(fn.deferreturn) 996 p.deferBitsPtr = deferBitsPtr 997 p.slotsPtr = add(varp, -uintptr(slotsOffset)) 998 999 return true 1000 } 1001 1002 // The implementation of the predeclared function recover. 1003 // Cannot split the stack because it needs to reliably 1004 // find the stack segment of its caller. 1005 // 1006 // TODO(rsc): Once we commit to CopyStackAlways, 1007 // this doesn't need to be nosplit. 1008 // 1009 //go:nosplit 1010 func gorecover(argp uintptr) any { 1011 // Must be in a function running as part of a deferred call during the panic. 1012 // Must be called from the topmost function of the call 1013 // (the function used in the defer statement). 1014 // p.argp is the argument pointer of that topmost deferred function call. 1015 // Compare against argp reported by caller. 1016 // If they match, the caller is the one who can recover. 1017 gp := getg() 1018 p := gp._panic 1019 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1020 p.recovered = true 1021 return p.arg 1022 } 1023 return nil 1024 } 1025 1026 //go:linkname sync_throw sync.throw 1027 func sync_throw(s string) { 1028 throw(s) 1029 } 1030 1031 //go:linkname sync_fatal sync.fatal 1032 func sync_fatal(s string) { 1033 fatal(s) 1034 } 1035 1036 //go:linkname rand_fatal crypto/rand.fatal 1037 func rand_fatal(s string) { 1038 fatal(s) 1039 } 1040 1041 //go:linkname fips_fatal crypto/internal/fips.fatal 1042 func fips_fatal(s string) { 1043 fatal(s) 1044 } 1045 1046 //go:linkname maps_fatal internal/runtime/maps.fatal 1047 func maps_fatal(s string) { 1048 fatal(s) 1049 } 1050 1051 // throw triggers a fatal error that dumps a stack trace and exits. 1052 // 1053 // throw should be used for runtime-internal fatal errors where Go itself, 1054 // rather than user code, may be at fault for the failure. 1055 // 1056 // NOTE: temporarily marked "go:noinline" pending investigation/fix of 1057 // issue #67274, so as to fix longtest builders. 1058 // 1059 // throw should be an internal detail, 1060 // but widely used packages access it using linkname. 1061 // Notable members of the hall of shame include: 1062 // - github.com/bytedance/sonic 1063 // - github.com/cockroachdb/pebble 1064 // - github.com/dgraph-io/ristretto 1065 // - github.com/outcaste-io/ristretto 1066 // - github.com/pingcap/br 1067 // - gvisor.dev/gvisor 1068 // - github.com/sagernet/gvisor 1069 // 1070 // Do not remove or change the type signature. 1071 // See go.dev/issue/67401. 1072 // 1073 //go:linkname throw 1074 //go:nosplit 1075 func throw(s string) { 1076 // Everything throw does should be recursively nosplit so it 1077 // can be called even when it's unsafe to grow the stack. 1078 systemstack(func() { 1079 print("fatal error: ") 1080 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1081 print("\n") 1082 }) 1083 1084 fatalthrow(throwTypeRuntime) 1085 } 1086 1087 // fatal triggers a fatal error that dumps a stack trace and exits. 1088 // 1089 // fatal is equivalent to throw, but is used when user code is expected to be 1090 // at fault for the failure, such as racing map writes. 1091 // 1092 // fatal does not include runtime frames, system goroutines, or frame metadata 1093 // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. 1094 // 1095 //go:nosplit 1096 func fatal(s string) { 1097 // Everything fatal does should be recursively nosplit so it 1098 // can be called even when it's unsafe to grow the stack. 1099 printlock() // Prevent multiple interleaved fatal reports. See issue 69447. 1100 systemstack(func() { 1101 print("fatal error: ") 1102 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1103 print("\n") 1104 }) 1105 1106 fatalthrow(throwTypeUser) 1107 printunlock() 1108 } 1109 1110 // runningPanicDefers is non-zero while running deferred functions for panic. 1111 // This is used to try hard to get a panic stack trace out when exiting. 1112 var runningPanicDefers atomic.Uint32 1113 1114 // panicking is non-zero when crashing the program for an unrecovered panic. 1115 var panicking atomic.Uint32 1116 1117 // paniclk is held while printing the panic information and stack trace, 1118 // so that two concurrent panics don't overlap their output. 1119 var paniclk mutex 1120 1121 // Unwind the stack after a deferred function calls recover 1122 // after a panic. Then arrange to continue running as though 1123 // the caller of the deferred function returned normally. 1124 // 1125 // However, if unwinding the stack would skip over a Goexit call, we 1126 // return into the Goexit loop instead, so it can continue processing 1127 // defers instead. 1128 func recovery(gp *g) { 1129 p := gp._panic 1130 pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) 1131 p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 1132 1133 // Unwind the panic stack. 1134 for ; p != nil && uintptr(p.startSP) < sp; p = p.link { 1135 // Don't allow jumping past a pending Goexit. 1136 // Instead, have its _panic.start() call return again. 1137 // 1138 // TODO(mdempsky): In this case, Goexit will resume walking the 1139 // stack where it left off, which means it will need to rewalk 1140 // frames that we've already processed. 1141 // 1142 // There's a similar issue with nested panics, when the inner 1143 // panic supercedes the outer panic. Again, we end up needing to 1144 // walk the same stack frames. 1145 // 1146 // These are probably pretty rare occurrences in practice, and 1147 // they don't seem any worse than the existing logic. But if we 1148 // move the unwinding state into _panic, we could detect when we 1149 // run into where the last panic started, and then just pick up 1150 // where it left off instead. 1151 // 1152 // With how subtle defer handling is, this might not actually be 1153 // worthwhile though. 1154 if p.goexit { 1155 pc, sp = p.startPC, uintptr(p.startSP) 1156 saveOpenDeferState = false // goexit is unwinding the stack anyway 1157 break 1158 } 1159 1160 runningPanicDefers.Add(-1) 1161 } 1162 gp._panic = p 1163 1164 if p == nil { // must be done with signal 1165 gp.sig = 0 1166 } 1167 1168 if gp.param != nil { 1169 throw("unexpected gp.param") 1170 } 1171 if saveOpenDeferState { 1172 // If we're returning to deferreturn and there are more open-coded 1173 // defers for it to call, save enough state for it to be able to 1174 // pick up where p0 left off. 1175 gp.param = unsafe.Pointer(&savedOpenDeferState{ 1176 retpc: p0.retpc, 1177 1178 // We need to save deferBitsPtr and slotsPtr too, but those are 1179 // stack pointers. To avoid issues around heap objects pointing 1180 // to the stack, save them as offsets from SP. 1181 deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp), 1182 slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp), 1183 }) 1184 } 1185 1186 // TODO(mdempsky): Currently, we rely on frames containing "defer" 1187 // to end with "CALL deferreturn; RET". This allows deferreturn to 1188 // finish running any pending defers in the frame. 1189 // 1190 // But we should be able to tell whether there are still pending 1191 // defers here. If there aren't, we can just jump directly to the 1192 // "RET" instruction. And if there are, we don't need an actual 1193 // "CALL deferreturn" instruction; we can simulate it with something 1194 // like: 1195 // 1196 // if usesLR { 1197 // lr = pc 1198 // } else { 1199 // sp -= sizeof(pc) 1200 // *(*uintptr)(sp) = pc 1201 // } 1202 // pc = funcPC(deferreturn) 1203 // 1204 // So that we effectively tail call into deferreturn, such that it 1205 // then returns to the simple "RET" epilogue. That would save the 1206 // overhead of the "deferreturn" call when there aren't actually any 1207 // pending defers left, and shrink the TEXT size of compiled 1208 // binaries. (Admittedly, both of these are modest savings.) 1209 1210 // Ensure we're recovering within the appropriate stack. 1211 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1212 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1213 throw("bad recovery") 1214 } 1215 1216 // Make the deferproc for this d return again, 1217 // this time returning 1. The calling function will 1218 // jump to the standard return epilogue. 1219 gp.sched.sp = sp 1220 gp.sched.pc = pc 1221 gp.sched.lr = 0 1222 // Restore the bp on platforms that support frame pointers. 1223 // N.B. It's fine to not set anything for platforms that don't 1224 // support frame pointers, since nothing consumes them. 1225 switch { 1226 case goarch.IsAmd64 != 0: 1227 // on x86, fp actually points one word higher than the top of 1228 // the frame since the return address is saved on the stack by 1229 // the caller 1230 gp.sched.bp = fp - 2*goarch.PtrSize 1231 case goarch.IsArm64 != 0: 1232 // on arm64, the architectural bp points one word higher 1233 // than the sp. fp is totally useless to us here, because it 1234 // only gets us to the caller's fp. 1235 gp.sched.bp = sp - goarch.PtrSize 1236 } 1237 gp.sched.ret = 1 1238 gogo(&gp.sched) 1239 } 1240 1241 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1242 // system, prints stack traces starting from its caller, and terminates the 1243 // process. 1244 // 1245 //go:nosplit 1246 func fatalthrow(t throwType) { 1247 pc := sys.GetCallerPC() 1248 sp := sys.GetCallerSP() 1249 gp := getg() 1250 1251 if gp.m.throwing == throwTypeNone { 1252 gp.m.throwing = t 1253 } 1254 1255 // Switch to the system stack to avoid any stack growth, which may make 1256 // things worse if the runtime is in a bad state. 1257 systemstack(func() { 1258 if isSecureMode() { 1259 exit(2) 1260 } 1261 1262 startpanic_m() 1263 1264 if dopanic_m(gp, pc, sp) { 1265 // crash uses a decent amount of nosplit stack and we're already 1266 // low on stack in throw, so crash on the system stack (unlike 1267 // fatalpanic). 1268 crash() 1269 } 1270 1271 exit(2) 1272 }) 1273 1274 *(*int)(nil) = 0 // not reached 1275 } 1276 1277 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1278 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1279 // runningPanicDefers once main is blocked from exiting. 1280 // 1281 //go:nosplit 1282 func fatalpanic(msgs *_panic) { 1283 pc := sys.GetCallerPC() 1284 sp := sys.GetCallerSP() 1285 gp := getg() 1286 var docrash bool 1287 // Switch to the system stack to avoid any stack growth, which 1288 // may make things worse if the runtime is in a bad state. 1289 systemstack(func() { 1290 if startpanic_m() && msgs != nil { 1291 // There were panic messages and startpanic_m 1292 // says it's okay to try to print them. 1293 1294 // startpanic_m set panicking, which will 1295 // block main from exiting, so now OK to 1296 // decrement runningPanicDefers. 1297 runningPanicDefers.Add(-1) 1298 1299 printpanics(msgs) 1300 } 1301 1302 docrash = dopanic_m(gp, pc, sp) 1303 }) 1304 1305 if docrash { 1306 // By crashing outside the above systemstack call, debuggers 1307 // will not be confused when generating a backtrace. 1308 // Function crash is marked nosplit to avoid stack growth. 1309 crash() 1310 } 1311 1312 systemstack(func() { 1313 exit(2) 1314 }) 1315 1316 *(*int)(nil) = 0 // not reached 1317 } 1318 1319 // startpanic_m prepares for an unrecoverable panic. 1320 // 1321 // It returns true if panic messages should be printed, or false if 1322 // the runtime is in bad shape and should just print stacks. 1323 // 1324 // It must not have write barriers even though the write barrier 1325 // explicitly ignores writes once dying > 0. Write barriers still 1326 // assume that g.m.p != nil, and this function may not have P 1327 // in some contexts (e.g. a panic in a signal handler for a signal 1328 // sent to an M with no P). 1329 // 1330 //go:nowritebarrierrec 1331 func startpanic_m() bool { 1332 gp := getg() 1333 if mheap_.cachealloc.size == 0 { // very early 1334 print("runtime: panic before malloc heap initialized\n") 1335 } 1336 // Disallow malloc during an unrecoverable panic. A panic 1337 // could happen in a signal handler, or in a throw, or inside 1338 // malloc itself. We want to catch if an allocation ever does 1339 // happen (even if we're not in one of these situations). 1340 gp.m.mallocing++ 1341 1342 // If we're dying because of a bad lock count, set it to a 1343 // good lock count so we don't recursively panic below. 1344 if gp.m.locks < 0 { 1345 gp.m.locks = 1 1346 } 1347 1348 switch gp.m.dying { 1349 case 0: 1350 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1351 gp.m.dying = 1 1352 panicking.Add(1) 1353 lock(&paniclk) 1354 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1355 schedtrace(true) 1356 } 1357 freezetheworld() 1358 return true 1359 case 1: 1360 // Something failed while panicking. 1361 // Just print a stack trace and exit. 1362 gp.m.dying = 2 1363 print("panic during panic\n") 1364 return false 1365 case 2: 1366 // This is a genuine bug in the runtime, we couldn't even 1367 // print the stack trace successfully. 1368 gp.m.dying = 3 1369 print("stack trace unavailable\n") 1370 exit(4) 1371 fallthrough 1372 default: 1373 // Can't even print! Just exit. 1374 exit(5) 1375 return false // Need to return something. 1376 } 1377 } 1378 1379 var didothers bool 1380 var deadlock mutex 1381 1382 // gp is the crashing g running on this M, but may be a user G, while getg() is 1383 // always g0. 1384 func dopanic_m(gp *g, pc, sp uintptr) bool { 1385 if gp.sig != 0 { 1386 signame := signame(gp.sig) 1387 if signame != "" { 1388 print("[signal ", signame) 1389 } else { 1390 print("[signal ", hex(gp.sig)) 1391 } 1392 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1393 } 1394 1395 level, all, docrash := gotraceback() 1396 if level > 0 { 1397 if gp != gp.m.curg { 1398 all = true 1399 } 1400 if gp != gp.m.g0 { 1401 print("\n") 1402 goroutineheader(gp) 1403 traceback(pc, sp, 0, gp) 1404 } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { 1405 print("\nruntime stack:\n") 1406 traceback(pc, sp, 0, gp) 1407 } 1408 if !didothers && all { 1409 didothers = true 1410 tracebackothers(gp) 1411 } 1412 } 1413 unlock(&paniclk) 1414 1415 if panicking.Add(-1) != 0 { 1416 // Some other m is panicking too. 1417 // Let it print what it needs to print. 1418 // Wait forever without chewing up cpu. 1419 // It will exit when it's done. 1420 lock(&deadlock) 1421 lock(&deadlock) 1422 } 1423 1424 printDebugLog() 1425 1426 return docrash 1427 } 1428 1429 // canpanic returns false if a signal should throw instead of 1430 // panicking. 1431 // 1432 //go:nosplit 1433 func canpanic() bool { 1434 gp := getg() 1435 mp := acquirem() 1436 1437 // Is it okay for gp to panic instead of crashing the program? 1438 // Yes, as long as it is running Go code, not runtime code, 1439 // and not stuck in a system call. 1440 if gp != mp.curg { 1441 releasem(mp) 1442 return false 1443 } 1444 // N.B. mp.locks != 1 instead of 0 to account for acquirem. 1445 if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { 1446 releasem(mp) 1447 return false 1448 } 1449 status := readgstatus(gp) 1450 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1451 releasem(mp) 1452 return false 1453 } 1454 if GOOS == "windows" && mp.libcallsp != 0 { 1455 releasem(mp) 1456 return false 1457 } 1458 releasem(mp) 1459 return true 1460 } 1461 1462 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1463 // return PC (pushing a frame for the call). Otherwise, it should be 1464 // left alone so that LR is used as sigpanic's return PC, effectively 1465 // replacing the top-most frame with sigpanic. This is used by 1466 // preparePanic. 1467 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1468 if pc == 0 { 1469 // Probably a call to a nil func. The old LR is more 1470 // useful in the stack trace. Not pushing the frame 1471 // will make the trace look like a call to sigpanic 1472 // instead. (Otherwise the trace will end at sigpanic 1473 // and we won't get to see who faulted.) 1474 return false 1475 } 1476 // If we don't recognize the PC as code, but we do recognize 1477 // the link register as code, then this assumes the panic was 1478 // caused by a call to non-code. In this case, we want to 1479 // ignore this call to make unwinding show the context. 1480 // 1481 // If we running C code, we're not going to recognize pc as a 1482 // Go function, so just assume it's good. Otherwise, traceback 1483 // may try to read a stale LR that looks like a Go code 1484 // pointer and wander into the woods. 1485 if gp.m.incgo || findfunc(pc).valid() { 1486 // This wasn't a bad call, so use PC as sigpanic's 1487 // return PC. 1488 return true 1489 } 1490 if findfunc(lr).valid() { 1491 // This was a bad call, but the LR is good, so use the 1492 // LR as sigpanic's return PC. 1493 return false 1494 } 1495 // Neither the PC or LR is good. Hopefully pushing a frame 1496 // will work. 1497 return true 1498 } 1499 1500 // isAbortPC reports whether pc is the program counter at which 1501 // runtime.abort raises a signal. 1502 // 1503 // It is nosplit because it's part of the isgoexception 1504 // implementation. 1505 // 1506 //go:nosplit 1507 func isAbortPC(pc uintptr) bool { 1508 f := findfunc(pc) 1509 if !f.valid() { 1510 return false 1511 } 1512 return f.funcID == abi.FuncID_abort 1513 } 1514