// Copyright 2014 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtimeimport ()// We have two different ways of doing defers. The older way involves creating a// defer record at the time that a defer statement is executing and adding it to a// defer chain. This chain is inspected by the deferreturn call at all function// exits in order to run the appropriate defer calls. A cheaper way (which we call// open-coded defers) is used for functions in which no defer statements occur in// loops. In that case, we simply store the defer function/arg information into// specific stack slots at the point of each defer statement, as well as setting a// bit in a bitmask. At each function exit, we add inline code to directly make// the appropriate defer calls based on the bitmask and fn/arg information stored// on the stack. During panic/Goexit processing, the appropriate defer calls are// made using extra funcdata info that indicates the exact stack slots that// contain the bitmask and defer fn/args.// Check to make sure we can really generate a panic. If the panic// was generated from the runtime, or from inside malloc, then convert// to a throw of msg.// pc should be the program counter of the compiler-generated code that// triggered this panic.func ( uintptr, string) {ifsys.GoarchWasm == 0 && hasPrefix(funcname(findfunc()), "runtime.") {// Note: wasm can't tail call, so we can't get the original caller's pc.throw() }// TODO: is this redundant? How could we be in malloc // but not in the runtime? runtime/internal/*, maybe? := getg()if != nil && .m != nil && .m.mallocing != 0 {throw() }}// Same as above, but calling from the runtime is allowed.//// Using this function is necessary for any panic that may be// generated by runtime.sigpanic, since those are always called by the// runtime.func ( string) {// panic allocates, so to avoid recursive malloc, turn panics // during malloc into throws. := getg()if != nil && .m != nil && .m.mallocing != 0 {throw() }}// Many of the following panic entry-points turn into throws when they// happen in various runtime contexts. These should never happen in// the runtime, and if they do, they indicate a serious issue and// should not be caught by user code.//// The panic{Index,Slice,divide,shift} functions are called by// code generated by the compiler for out of bounds index expressions,// out of bounds slice expressions, division by zero, and shift by negative.// The panicdivide (again), panicoverflow, panicfloat, and panicmem// functions are called by the signal handler when a signal occurs// indicating the respective problem.//// Since panic{Index,Slice,shift} are never called directly, and// since the runtime package should never have an out of bounds slice// or array reference or negative shift, if we see those functions called from the// runtime package we turn the panic into a throw. That will dump the// entire runtime stack for easier debugging.//// The entry points called by the signal handler will be called from// runtime.sigpanic, so we can't disallow calls from the runtime to// these (they always look like they're called from the runtime).// Hence, for these, we just check for clearly bad runtime conditions.//// The panic{Index,Slice} functions are implemented in assembly and tail call// to the goPanic{Index,Slice} functions below. This is done so we can use// a space-minimal register calling convention.// failures in the comparisons for s[x], 0 <= x < y (y == len(s))func ( int, int) {panicCheck1(getcallerpc(), "index out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsIndex})}func ( uint, int) {panicCheck1(getcallerpc(), "index out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsIndex})}// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))func ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceAlen})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceAlen})}func ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceAcap})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceAcap})}// failures in the comparisons for s[x:y], 0 <= x <= yfunc ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceB})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceB})}// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))func ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3Alen})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3Alen})}func ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3Acap})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3Acap})}// failures in the comparisons for s[:x:y], 0 <= x <= yfunc ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3B})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3B})}// failures in the comparisons for s[x:y:], 0 <= x <= yfunc ( int, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3C})}func ( uint, int) {panicCheck1(getcallerpc(), "slice bounds out of range")panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3C})}// Implemented in assembly, as they take arguments in registers.// Declared here to mark them as ABIInternal.func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)func ( int, int)func ( uint, int)varshiftError = error(errorString("negative shift amount"))func () {panicCheck1(getcallerpc(), "negative shift amount")panic(shiftError)}vardivideError = error(errorString("integer divide by zero"))func () {panicCheck2("integer divide by zero")panic(divideError)}varoverflowError = error(errorString("integer overflow"))func () {panicCheck2("integer overflow")panic(overflowError)}varfloatError = error(errorString("floating point error"))func () {panicCheck2("floating point error")panic(floatError)}varmemoryError = error(errorString("invalid memory address or nil pointer dereference"))func () {panicCheck2("invalid memory address or nil pointer dereference")panic(memoryError)}func ( uintptr) {panicCheck2("invalid memory address or nil pointer dereference")panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: })}// Create a new deferred function fn with siz bytes of arguments.// The compiler turns a defer statement into a call to this.//go:nosplitfunc ( int32, *funcval) { // arguments of fn follow fn := getg()if .m.curg != {// go code on the system stack can't deferthrow("defer on system stack") }// the arguments of fn are in a perilous state. The stack map // for deferproc does not describe them. So we can't let garbage // collection or stack copying trigger until we've copied them out // to somewhere safe. The memmove below does that. // Until the copy completes, we can only call nosplit routines. := getcallersp() := uintptr(unsafe.Pointer(&)) + unsafe.Sizeof() := getcallerpc() := newdefer()if ._panic != nil {throw("deferproc: d.panic != nil after newdefer") } .link = ._defer ._defer = .fn = .pc = .sp = switch {case0:// Do nothing.casesys.PtrSize: *(*uintptr)(deferArgs()) = *(*uintptr)(unsafe.Pointer())default:memmove(deferArgs(), unsafe.Pointer(), uintptr()) }// deferproc returns 0 normally. // a deferred func that stops a panic // makes the deferproc return 1. // the code the compiler generates always // checks the return value and jumps to the // end of the function if deferproc returns != 0.return0()// No code can go here - the C return register has // been set and must not be clobbered.}// deferprocStack queues a new deferred function with a defer record on the stack.// The defer record must have its siz and fn fields initialized.// All other fields can contain junk.// The defer record must be immediately followed in memory by// the arguments of the defer.// Nosplit because the arguments on the stack won't be scanned// until the defer record is spliced into the gp._defer list.//go:nosplitfunc ( *_defer) { := getg()if .m.curg != {// go code on the system stack can't deferthrow("defer on system stack") }// siz and fn are already set. // The other fields are junk on entry to deferprocStack and // are initialized here. .started = false .heap = false .openDefer = false .sp = getcallersp() .pc = getcallerpc() .framepc = 0 .varp = 0// The lines below implement: // d.panic = nil // d.fd = nil // d.link = gp._defer // gp._defer = d // But without write barriers. The first three are writes to // the stack so they don't need a write barrier, and furthermore // are to uninitialized memory, so they must not use a write barrier. // The fourth write does not require a write barrier because we // explicitly mark all the defer structures, so we don't need to // keep track of pointers to them with a write barrier. *(*uintptr)(unsafe.Pointer(&._panic)) = 0 *(*uintptr)(unsafe.Pointer(&.fd)) = 0 *(*uintptr)(unsafe.Pointer(&.link)) = uintptr(unsafe.Pointer(._defer)) *(*uintptr)(unsafe.Pointer(&._defer)) = uintptr(unsafe.Pointer())return0()// No code can go here - the C return register has // been set and must not be clobbered.}// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...// Each P holds a pool for defers with small arg sizes.// Assign defer allocations to pools by rounding to 16, to match malloc size classes.const (deferHeaderSize = unsafe.Sizeof(_defer{})minDeferAlloc = (deferHeaderSize + 15) &^ 15minDeferArgs = minDeferAlloc - deferHeaderSize)// defer size class for arg size sz//go:nosplitfunc ( uintptr) uintptr {if <= minDeferArgs {return0 }return ( - minDeferArgs + 15) / 16}// total size of memory block for defer with arg size szfunc ( uintptr) uintptr {if <= minDeferArgs {returnminDeferAlloc }returndeferHeaderSize + }// Ensure that defer arg sizes that map to the same defer size class// also map to the same malloc size class.func () {var [len(p{}.deferpool)]int32for := range { [] = -1 }for := uintptr(0); ; ++ { := deferclass()if >= uintptr(len()) {break } := roundupsize(totaldefersize())if [] < 0 { [] = int32()continue }if [] != int32() {print("bad defer size class: i=", , " siz=", , " defersc=", , "\n")throw("bad defer size class") } }}// The arguments associated with a deferred call are stored// immediately after the _defer header in memory.//go:nosplitfunc ( *_defer) unsafe.Pointer {if .siz == 0 {// Avoid pointer past the defer allocation.returnnil }returnadd(unsafe.Pointer(), unsafe.Sizeof(*))}vardeferType *_type// type of _defer structfunc () {varinterface{} = (*_defer)(nil)deferType = (*(**ptrtype)(unsafe.Pointer(&))).elem}// Allocate a Defer, usually using per-P pool.// Each defer must be released with freedefer. The defer is not// added to any defer chain yet.//// This must not grow the stack because there may be a frame without// stack map information when this is called.////go:nosplitfunc ( int32) *_defer {var *_defer := deferclass(uintptr()) := getg()if < uintptr(len(p{}.deferpool)) { := .m.p.ptr()iflen(.deferpool[]) == 0 && sched.deferpool[] != nil {// Take the slow path on the system stack so // we don't grow newdefer's stack.systemstack(func() {lock(&sched.deferlock)forlen(.deferpool[]) < cap(.deferpool[])/2 && sched.deferpool[] != nil { := sched.deferpool[]sched.deferpool[] = .link .link = nil .deferpool[] = append(.deferpool[], ) }unlock(&sched.deferlock) }) }if := len(.deferpool[]); > 0 { = .deferpool[][-1] .deferpool[][-1] = nil .deferpool[] = .deferpool[][:-1] } }if == nil {// Allocate new defer+args.systemstack(func() { := roundupsize(totaldefersize(uintptr())) = (*_defer)(mallocgc(, deferType, true)) }) } .siz = .heap = truereturn}// Free the given defer.// The defer cannot be used after this call.//// This must not grow the stack because there may be a frame without a// stack map when this is called.////go:nosplitfunc ( *_defer) {if ._panic != nil {freedeferpanic() }if .fn != nil {freedeferfn() }if !.heap {return } := deferclass(uintptr(.siz))if >= uintptr(len(p{}.deferpool)) {return } := getg().m.p.ptr()iflen(.deferpool[]) == cap(.deferpool[]) {// Transfer half of local cache to the central cache. // // Take this slow path on the system stack so // we don't grow freedefer's stack.systemstack(func() {var , *_deferforlen(.deferpool[]) > cap(.deferpool[])/2 { := len(.deferpool[]) := .deferpool[][-1] .deferpool[][-1] = nil .deferpool[] = .deferpool[][:-1]if == nil { = } else { .link = } = }lock(&sched.deferlock) .link = sched.deferpool[]sched.deferpool[] = unlock(&sched.deferlock) }) }// These lines used to be simply `*d = _defer{}` but that // started causing a nosplit stack overflow via typedmemmove. .siz = 0 .started = false .openDefer = false .sp = 0 .pc = 0 .framepc = 0 .varp = 0 .fd = nil// d._panic and d.fn must be nil already. // If not, we would have called freedeferpanic or freedeferfn above, // both of which throw. .link = nil .deferpool[] = append(.deferpool[], )}// Separate function so that it can split stack.// Windows otherwise runs out of stack space.func () {// _panic must be cleared before d is unlinked from gp.throw("freedefer with d._panic != nil")}func () {// fn must be cleared before d is unlinked from gp.throw("freedefer with d.fn != nil")}// Run a deferred function if there is one.// The compiler inserts a call to this at the end of any// function which calls defer.// If there is a deferred function, this will call runtime·jmpdefer,// which will jump to the deferred function such that it appears// to have been called by the caller of deferreturn at the point// just before deferreturn was called. The effect is that deferreturn// is called again and again until there are no more deferred functions.//// Declared as nosplit, because the function should not be preempted once we start// modifying the caller's frame in order to reuse the frame to call the deferred// function.//// The single argument isn't actually used - it just has its address// taken so it can be matched against pending defers.//go:nosplitfunc ( uintptr) { := getg() := ._deferif == nil {return } := getcallersp()if .sp != {return }if .openDefer { := runOpenDeferFrame(, )if ! {throw("unfinished open-coded defers in deferreturn") } ._defer = .linkfreedefer()return }// Moving arguments around. // // Everything called after this point must be recursively // nosplit because the garbage collector won't know the form // of the arguments until the jmpdefer can flip the PC over to // fn.switch .siz {case0:// Do nothing.casesys.PtrSize: *(*uintptr)(unsafe.Pointer(&)) = *(*uintptr)(deferArgs())default:memmove(unsafe.Pointer(&), deferArgs(), uintptr(.siz)) } := .fn .fn = nil ._defer = .linkfreedefer()// If the defer function pointer is nil, force the seg fault to happen // here rather than in jmpdefer. gentraceback() throws an error if it is // called with a callback on an LR architecture and jmpdefer is on the // stack, because the stack trace can be incorrect in that case - see // issue #8153). _ = .fnjmpdefer(, uintptr(unsafe.Pointer(&)))}// Goexit terminates the goroutine that calls it. No other goroutine is affected.// Goexit runs all deferred calls before terminating the goroutine. Because Goexit// is not a panic, any recover calls in those deferred functions will return nil.//// Calling Goexit from the main goroutine terminates that goroutine// without func main returning. Since func main has not returned,// the program continues execution of other goroutines.// If all other goroutines exit, the program crashes.func () {// Run all deferred functions for the current goroutine. // This code is similar to gopanic, see that implementation // for detailed comments. := getg()// Create a panic object for Goexit, so we can recognize when it might be // bypassed by a recover().var_panic .goexit = true .link = ._panic ._panic = (*_panic)(noescape(unsafe.Pointer(&)))addOneOpenDeferFrame(, getcallerpc(), unsafe.Pointer(getcallersp()))for { := ._deferif == nil {break }if .started {if ._panic != nil { ._panic.aborted = true ._panic = nil }if !.openDefer { .fn = nil ._defer = .linkfreedefer()continue } } .started = true ._panic = (*_panic)(noescape(unsafe.Pointer(&)))if .openDefer { := runOpenDeferFrame(, )if ! {// We should always run all defers in the frame, // since there is no panic associated with this // defer that can be recovered.throw("unfinished open-coded defers in Goexit") }if .aborted {// Since our current defer caused a panic and may // have been already freed, just restart scanning // for open-coded defers from this frame again.addOneOpenDeferFrame(, getcallerpc(), unsafe.Pointer(getcallersp())) } else {addOneOpenDeferFrame(, 0, nil) } } else {// Save the pc/sp in reflectcallSave(), so we can "recover" back to this // loop if necessary.reflectcallSave(&, unsafe.Pointer(.fn), deferArgs(), uint32(.siz)) }if .aborted {// We had a recursive panic in the defer d we started, and // then did a recover in a defer that was further down the // defer chain than d. In the case of an outstanding Goexit, // we force the recover to return back to this loop. d will // have already been freed if completed, so just continue // immediately to the next defer on the chain. .aborted = falsecontinue }if ._defer != {throw("bad defer entry in Goexit") } ._panic = nil .fn = nil ._defer = .linkfreedefer()// Note: we ignore recovers here because Goexit isn't a panic }goexit1()}// Call all Error and String methods before freezing the world.// Used when crashing with panicking.func ( *_panic) {deferfunc() {ifrecover() != nil {throw("panic while printing panic value") } }()for != nil {switch v := .arg.(type) {caseerror: .arg = .Error()casestringer: .arg = .String() } = .link }}// Print all currently active panics. Used when crashing.// Should only be called after preprintpanics.func ( *_panic) {if .link != nil { (.link)if !.link.goexit {print("\t") } }if .goexit {return }print("panic: ")printany(.arg)if .recovered {print(" [recovered]") }print("\n")}// addOneOpenDeferFrame scans the stack for the first frame (if any) with// open-coded defers and if it finds one, adds a single record to the defer chain// for that frame. If sp is non-nil, it starts the stack scan from the frame// specified by sp. If sp is nil, it uses the sp from the current defer record// (which has just been finished). Hence, it continues the stack scan from the// frame of the defer that just finished. It skips any frame that already has an// open-coded _defer record, which would have been been created from a previous// (unrecovered) panic.//// Note: All entries of the defer chain (including this new open-coded entry) have// their pointers (including sp) adjusted properly if the stack moves while// running deferred functions. Also, it is safe to pass in the sp arg (which is// the direct result of calling getcallersp()), because all pointer variables// (including arguments) are adjusted as needed during stack copies.func ( *g, uintptr, unsafe.Pointer) {var *_deferif == nil { = ._defer = .framepc = unsafe.Pointer(.sp) }systemstack(func() {gentraceback(, uintptr(), 0, , 0, nil, 0x7fffffff,func( *stkframe, unsafe.Pointer) bool {if != nil && .sp == .sp {// Skip the frame for the previous defer that // we just finished (and was used to set // where we restarted the stack scan)returntrue } := .fn := funcdata(, _FUNCDATA_OpenCodedDeferInfo)if == nil {returntrue }// Insert the open defer record in the // chain, in order sorted by sp. := ._defervar *_deferfor != nil { := .spif .sp < {break }if .sp == {if !.openDefer {throw("duplicated defer entry") }returntrue } = = .link }if .fn.deferreturn == 0 {throw("missing deferreturn") } , := readvarintUnsafe() := newdefer(int32()) .openDefer = true ._panic = nil// These are the pc/sp to set after we've // run a defer in this frame that did a // recover. We return to a special // deferreturn that runs any remaining // defers and then returns from the // function. .pc = .fn.entry + uintptr(.fn.deferreturn) .varp = .varp .fd = // Save the SP/PC associated with current frame, // so we can continue stack trace later if needed. .framepc = .pc .sp = .sp .link = if == nil { ._defer = } else { .link = }// Stop stack scanning after adding one open defer recordreturnfalse },nil, 0) })}// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the// uint32 and a pointer to the byte following the varint.//// There is a similar function runtime.readvarint, which takes a slice of bytes,// rather than an unsafe pointer. These functions are duplicated, because one of// the two use cases for the functions would get slower if the functions were// combined.func ( unsafe.Pointer) (uint32, unsafe.Pointer) {varuint32varintfor { := *(*uint8)((unsafe.Pointer())) = add(, unsafe.Sizeof())if < 128 {return + uint32()<<, } += ((uint32() &^ 128) << ) += 7if > 28 {panic("Bad varint") } }}// runOpenDeferFrame runs the active open-coded defers in the frame specified by// d. It normally processes all active defers in the frame, but stops immediately// if a defer does a successful recover. It returns true if there are no// remaining defers to run in the frame.func ( *g, *_defer) bool { := true := .fd// Skip the maxargsize _, = readvarintUnsafe() , := readvarintUnsafe() , := readvarintUnsafe() := *(*uint8)(unsafe.Pointer(.varp - uintptr()))for := int() - 1; >= 0; -- {// read the funcdata info for this defervar , , uint32 , = readvarintUnsafe() , = readvarintUnsafe() , = readvarintUnsafe()if &(1<<) == 0 {for := uint32(0); < ; ++ { _, = readvarintUnsafe() _, = readvarintUnsafe() _, = readvarintUnsafe() }continue } := *(**funcval)(unsafe.Pointer(.varp - uintptr())) .fn = := deferArgs()// If there is an interface receiver or method receiver, it is // described/included as the first arg.for := uint32(0); < ; ++ {var , , uint32 , = readvarintUnsafe() , = readvarintUnsafe() , = readvarintUnsafe()memmove(unsafe.Pointer(uintptr()+uintptr()),unsafe.Pointer(.varp-uintptr()),uintptr()) } = &^ (1 << ) *(*uint8)(unsafe.Pointer(.varp - uintptr())) = := ._panicreflectcallSave(, unsafe.Pointer(), , )if != nil && .aborted {break } .fn = nil// These args are just a copy, so can be cleared immediatelymemclrNoHeapPointers(, uintptr())if ._panic != nil && ._panic.recovered { = == 0break } }return}// reflectcallSave calls reflectcall after saving the caller's pc and sp in the// panic record. This allows the runtime to return to the Goexit defer processing// loop, in the unusual case where the Goexit may be bypassed by a successful// recover.func ( *_panic, , unsafe.Pointer, uint32) {if != nil { .argp = unsafe.Pointer(getargp(0)) .pc = getcallerpc() .sp = unsafe.Pointer(getcallersp()) }reflectcall(nil, , , , )if != nil { .pc = 0 .sp = unsafe.Pointer(nil) }}// The implementation of the predeclared function panic.func ( interface{}) { := getg()if .m.curg != {print("panic: ")printany()print("\n")throw("panic on system stack") }if .m.mallocing != 0 {print("panic: ")printany()print("\n")throw("panic during malloc") }if .m.preemptoff != "" {print("panic: ")printany()print("\n")print("preempt off reason: ")print(.m.preemptoff)print("\n")throw("panic during preemptoff") }if .m.locks != 0 {print("panic: ")printany()print("\n")throw("panic holding locks") }var_panic .arg = .link = ._panic ._panic = (*_panic)(noescape(unsafe.Pointer(&)))atomic.Xadd(&runningPanicDefers, 1)// By calculating getcallerpc/getcallersp here, we avoid scanning the // gopanic frame (stack scanning is slow...)addOneOpenDeferFrame(, getcallerpc(), unsafe.Pointer(getcallersp()))for { := ._deferif == nil {break }// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), // take defer off list. An earlier panic will not continue running, but we will make sure below that an // earlier Goexit does continue running.if .started {if ._panic != nil { ._panic.aborted = true } ._panic = nilif !.openDefer {// For open-coded defers, we need to process the // defer again, in case there are any other defers // to call in the frame (not including the defer // call that caused the panic). .fn = nil ._defer = .linkfreedefer()continue } }// Mark defer as started, but keep on list, so that traceback // can find and update the defer's argument frame if stack growth // or a garbage collection happens before reflectcall starts executing d.fn. .started = true// Record the panic that is running the defer. // If there is a new panic during the deferred call, that panic // will find d in the list and will mark d._panic (this panic) aborted. ._panic = (*_panic)(noescape(unsafe.Pointer(&))) := trueif .openDefer { = runOpenDeferFrame(, )if && !._panic.recovered {addOneOpenDeferFrame(, 0, nil) } } else { .argp = unsafe.Pointer(getargp(0))reflectcall(nil, unsafe.Pointer(.fn), deferArgs(), uint32(.siz), uint32(.siz)) } .argp = nil// reflectcall did not panic. Remove d.if ._defer != {throw("bad defer entry in panic") } ._panic = nil// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic //GC() := .pc := unsafe.Pointer(.sp) // must be pointer so it gets adjusted during stack copyif { .fn = nil ._defer = .linkfreedefer() }if .recovered { ._panic = .linkif ._panic != nil && ._panic.goexit && ._panic.aborted {// A normal recover would bypass/abort the Goexit. Instead, // we return to the processing loop of the Goexit. .sigcode0 = uintptr(._panic.sp) .sigcode1 = uintptr(._panic.pc)mcall(recovery)throw("bypassed recovery failed") // mcall should not return }atomic.Xadd(&runningPanicDefers, -1)// Remove any remaining non-started, open-coded // defer entries after a recover, since the // corresponding defers will be executed normally // (inline). Any such entry will become stale once // we run the corresponding defers inline and exit // the associated stack frame. := ._defervar *_deferif ! {// Skip our current frame, if not done. It is // needed to complete any remaining defers in // deferreturn() = = .link }for != nil {if .started {// This defer is started but we // are in the middle of a // defer-panic-recover inside of // it, so don't remove it or any // further defer entriesbreak }if .openDefer {if == nil { ._defer = .link } else { .link = .link } := .linkfreedefer() = } else { = = .link } } ._panic = .link// Aborted panics are marked but remain on the g.panic list. // Remove them from the list.for ._panic != nil && ._panic.aborted { ._panic = ._panic.link }if ._panic == nil { // must be done with signal .sig = 0 }// Pass information about recovering frame to recovery. .sigcode0 = uintptr() .sigcode1 = mcall(recovery)throw("recovery failed") // mcall should not return } }// ran out of deferred calls - old-school panic now // Because it is unsafe to call arbitrary user code after freezing // the world, we call preprintpanics to invoke all necessary Error // and String methods to prepare the panic strings before startpanic.preprintpanics(._panic)fatalpanic(._panic) // should not return *(*int)(nil) = 0// not reached}// getargp returns the location where the caller// writes outgoing function call arguments.//go:nosplit//go:noinlinefunc ( int) uintptr {// x is an argument mainly so that we can return its address.returnuintptr(noescape(unsafe.Pointer(&)))}// The implementation of the predeclared function recover.// Cannot split the stack because it needs to reliably// find the stack segment of its caller.//// TODO(rsc): Once we commit to CopyStackAlways,// this doesn't need to be nosplit.//go:nosplitfunc ( uintptr) interface{} {// Must be in a function running as part of a deferred call during the panic. // Must be called from the topmost function of the call // (the function used in the defer statement). // p.argp is the argument pointer of that topmost deferred function call. // Compare against argp reported by caller. // If they match, the caller is the one who can recover. := getg() := ._panicif != nil && !.goexit && !.recovered && == uintptr(.argp) { .recovered = truereturn .arg }returnnil}//go:linkname sync_throw sync.throwfunc ( string) {throw()}//go:nosplitfunc ( string) {// Everything throw does should be recursively nosplit so it // can be called even when it's unsafe to grow the stack.systemstack(func() {print("fatal error: ", , "\n") }) := getg()if .m.throwing == 0 { .m.throwing = 1 }fatalthrow() *(*int)(nil) = 0// not reached}// runningPanicDefers is non-zero while running deferred functions for panic.// runningPanicDefers is incremented and decremented atomically.// This is used to try hard to get a panic stack trace out when exiting.varrunningPanicDefersuint32// panicking is non-zero when crashing the program for an unrecovered panic.// panicking is incremented and decremented atomically.varpanickinguint32// paniclk is held while printing the panic information and stack trace,// so that two concurrent panics don't overlap their output.varpaniclkmutex// Unwind the stack after a deferred function calls recover// after a panic. Then arrange to continue running as though// the caller of the deferred function returned normally.func ( *g) {// Info about defer passed in G struct. := .sigcode0 := .sigcode1// d's arguments need to be in the stack.if != 0 && ( < .stack.lo || .stack.hi < ) {print("recover: ", hex(), " not in [", hex(.stack.lo), ", ", hex(.stack.hi), "]\n")throw("bad recovery") }// Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. .sched.sp = .sched.pc = .sched.lr = 0 .sched.ret = 1gogo(&.sched)}// fatalthrow implements an unrecoverable runtime throw. It freezes the// system, prints stack traces starting from its caller, and terminates the// process.////go:nosplitfunc () { := getcallerpc() := getcallersp() := getg()// Switch to the system stack to avoid any stack growth, which // may make things worse if the runtime is in a bad state.systemstack(func() {startpanic_m()ifdopanic_m(, , ) {// crash uses a decent amount of nosplit stack and we're already // low on stack in throw, so crash on the system stack (unlike // fatalpanic).crash() }exit(2) }) *(*int)(nil) = 0// not reached}// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except// that if msgs != nil, fatalpanic also prints panic messages and decrements// runningPanicDefers once main is blocked from exiting.////go:nosplitfunc ( *_panic) { := getcallerpc() := getcallersp() := getg()varbool// Switch to the system stack to avoid any stack growth, which // may make things worse if the runtime is in a bad state.systemstack(func() {ifstartpanic_m() && != nil {// There were panic messages and startpanic_m // says it's okay to try to print them.// startpanic_m set panicking, which will // block main from exiting, so now OK to // decrement runningPanicDefers.atomic.Xadd(&runningPanicDefers, -1)printpanics() } = dopanic_m(, , ) })if {// By crashing outside the above systemstack call, debuggers // will not be confused when generating a backtrace. // Function crash is marked nosplit to avoid stack growth.crash() }systemstack(func() {exit(2) }) *(*int)(nil) = 0// not reached}// startpanic_m prepares for an unrecoverable panic.//// It returns true if panic messages should be printed, or false if// the runtime is in bad shape and should just print stacks.//// It must not have write barriers even though the write barrier// explicitly ignores writes once dying > 0. Write barriers still// assume that g.m.p != nil, and this function may not have P// in some contexts (e.g. a panic in a signal handler for a signal// sent to an M with no P).////go:nowritebarrierrecfunc () bool { := getg()ifmheap_.cachealloc.size == 0 { // very earlyprint("runtime: panic before malloc heap initialized\n") }// Disallow malloc during an unrecoverable panic. A panic // could happen in a signal handler, or in a throw, or inside // malloc itself. We want to catch if an allocation ever does // happen (even if we're not in one of these situations). .m.mallocing++// If we're dying because of a bad lock count, set it to a // good lock count so we don't recursively panic below.if .m.locks < 0 { .m.locks = 1 }switch .m.dying {case0:// Setting dying >0 has the side-effect of disabling this G's writebuf. .m.dying = 1atomic.Xadd(&panicking, 1)lock(&paniclk)ifdebug.schedtrace > 0 || debug.scheddetail > 0 {schedtrace(true) }freezetheworld()returntruecase1:// Something failed while panicking. // Just print a stack trace and exit. .m.dying = 2print("panic during panic\n")returnfalsecase2:// This is a genuine bug in the runtime, we couldn't even // print the stack trace successfully. .m.dying = 3print("stack trace unavailable\n")exit(4)fallthroughdefault:// Can't even print! Just exit.exit(5)returnfalse// Need to return something. }}vardidothersboolvardeadlockmutexfunc ( *g, , uintptr) bool {if .sig != 0 { := signame(.sig)if != "" {print("[signal ", ) } else {print("[signal ", hex(.sig)) }print(" code=", hex(.sigcode0), " addr=", hex(.sigcode1), " pc=", hex(.sigpc), "]\n") } , , := gotraceback() := getg()if > 0 {if != .m.curg { = true }if != .m.g0 {print("\n")goroutineheader()traceback(, , 0, ) } elseif >= 2 || .m.throwing > 0 {print("\nruntime stack:\n")traceback(, , 0, ) }if !didothers && {didothers = truetracebackothers() } }unlock(&paniclk)ifatomic.Xadd(&panicking, -1) != 0 {// Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done.lock(&deadlock)lock(&deadlock) }printDebugLog()return}// canpanic returns false if a signal should throw instead of// panicking.////go:nosplitfunc ( *g) bool {// Note that g is m->gsignal, different from gp. // Note also that g->m can change at preemption, so m can go stale // if this function ever makes a function call. := getg() := .m// Is it okay for gp to panic instead of crashing the program? // Yes, as long as it is running Go code, not runtime code, // and not stuck in a system call.if == nil || != .curg {returnfalse }if .locks != 0 || .mallocing != 0 || .throwing != 0 || .preemptoff != "" || .dying != 0 {returnfalse } := readgstatus()if &^_Gscan != _Grunning || .syscallsp != 0 {returnfalse }ifGOOS == "windows" && .libcallsp != 0 {returnfalse }returntrue}// shouldPushSigpanic reports whether pc should be used as sigpanic's// return PC (pushing a frame for the call). Otherwise, it should be// left alone so that LR is used as sigpanic's return PC, effectively// replacing the top-most frame with sigpanic. This is used by// preparePanic.func ( *g, , uintptr) bool {if == 0 {// Probably a call to a nil func. The old LR is more // useful in the stack trace. Not pushing the frame // will make the trace look like a call to sigpanic // instead. (Otherwise the trace will end at sigpanic // and we won't get to see who faulted.)returnfalse }// If we don't recognize the PC as code, but we do recognize // the link register as code, then this assumes the panic was // caused by a call to non-code. In this case, we want to // ignore this call to make unwinding show the context. // // If we running C code, we're not going to recognize pc as a // Go function, so just assume it's good. Otherwise, traceback // may try to read a stale LR that looks like a Go code // pointer and wander into the woods.if .m.incgo || findfunc().valid() {// This wasn't a bad call, so use PC as sigpanic's // return PC.returntrue }iffindfunc().valid() {// This was a bad call, but the LR is good, so use the // LR as sigpanic's return PC.returnfalse }// Neither the PC or LR is good. Hopefully pushing a frame // will work.returntrue}// isAbortPC reports whether pc is the program counter at which// runtime.abort raises a signal.//// It is nosplit because it's part of the isgoexception// implementation.////go:nosplitfunc ( uintptr) bool {return == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && == funcPC(abort)+sys.PCQuantum)}