// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

// Frames may be used to get function/file/line information for a
// slice of PC values returned by Callers.
type Frames struct {
	// callers is a slice of PCs that have not yet been expanded to frames.
	callers []uintptr

	// frames is a slice of Frames that have yet to be returned.
	frames     []Frame
	frameStore [2]Frame
}

// Frame is the information returned by Frames for each call frame.
type Frame struct {
	// PC is the program counter for the location in this frame.
	// For a frame that calls another frame, this will be the
	// program counter of a call instruction. Because of inlining,
	// multiple frames may have the same PC value, but different
	// symbolic information.
	PC uintptr

	// Func is the Func value of this call frame. This may be nil
	// for non-Go code or fully inlined functions.
	Func *Func

	// Function is the package path-qualified function name of
	// this call frame. If non-empty, this string uniquely
	// identifies a single function in the program.
	// This may be the empty string if not known.
	// If Func is not nil then Function == Func.Name().
	Function string

	// File and Line are the file name and line number of the
	// location in this frame. For non-leaf frames, this will be
	// the location of a call. These may be the empty string and
	// zero, respectively, if not known.
	File string
	Line int

	// Entry point program counter for the function; may be zero
	// if not known. If Func is not nil then Entry ==
	// Func.Entry().
	Entry uintptr

	// The runtime's internal view of the function. This field
	// is set (funcInfo.valid() returns true) only for Go functions,
	// not for C functions.
	funcInfo funcInfo
}

// CallersFrames takes a slice of PC values returned by Callers and
// prepares to return function/file/line information.
// Do not change the slice until you are done with the Frames.
func ( []uintptr) *Frames {
	 := &Frames{callers: }
	.frames = .frameStore[:0]
	return 
}

// Next returns frame information for the next caller.
// If more is false, there are no more callers (the Frame value is valid).
func ( *Frames) () ( Frame,  bool) {
	for len(.frames) < 2 {
		// Find the next frame.
		// We need to look for 2 frames so we know what
		// to return for the "more" result.
		if len(.callers) == 0 {
			break
		}
		 := .callers[0]
		.callers = .callers[1:]
		 := findfunc()
		if !.valid() {
			if cgoSymbolizer != nil {
				// Pre-expand cgo frames. We could do this
				// incrementally, too, but there's no way to
				// avoid allocation in this case anyway.
				.frames = append(.frames, expandCgoFrames()...)
			}
			continue
		}
		 := ._Func()
		 := .Entry()
		if  >  {
			// We store the pc of the start of the instruction following
			// the instruction in question (the call or the inline mark).
			// This is done for historical reasons, and to make FuncForPC
			// work correctly for entries in the result of runtime.Callers.
			--
		}
		 := funcname()
		if  := funcdata(, _FUNCDATA_InlTree);  != nil {
			 := (*[1 << 20]inlinedCall)()
			 := pcdatavalue(, _PCDATA_InlTreeIndex, , nil)
			if  >= 0 {
				// Note: entry is not modified. It always refers to a real frame, not an inlined one.
				 = nil
				 = funcnameFromNameoff(, [].func_)
				// File/line is already correct.
				// TODO: remove file/line from InlinedCall?
			}
		}
		.frames = append(.frames, Frame{
			PC:       ,
			Func:     ,
			Function: ,
			Entry:    ,
			funcInfo: ,
			// Note: File,Line set below
		})
	}

	// Pop one frame from the frame list. Keep the rest.
	// Avoid allocation in the common case, which is 1 or 2 frames.
	switch len(.frames) {
	case 0: // In the rare case when there are no frames at all, we return Frame{}.
		return
	case 1:
		 = .frames[0]
		.frames = .frameStore[:0]
	case 2:
		 = .frames[0]
		.frameStore[0] = .frames[1]
		.frames = .frameStore[:1]
	default:
		 = .frames[0]
		.frames = .frames[1:]
	}
	 = len(.frames) > 0
	if .funcInfo.valid() {
		// Compute file/line just before we need to return it,
		// as it can be expensive. This avoids computing file/line
		// for the Frame we find but don't return. See issue 32093.
		,  := funcline1(.funcInfo, .PC, false)
		.File, .Line = , int()
	}
	return
}

// runtime_expandFinalInlineFrame expands the final pc in stk to include all
// "callers" if pc is inline.
//
//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
func ( []uintptr) []uintptr {
	if len() == 0 {
		return 
	}
	 := [len()-1]
	 :=  - 1

	 := findfunc()
	if !.valid() {
		// Not a Go function.
		return 
	}

	 := funcdata(, _FUNCDATA_InlTree)
	if  == nil {
		// Nothing inline in f.
		return 
	}

	// Treat the previous func as normal. We haven't actually checked, but
	// since this pc was included in the stack, we know it shouldn't be
	// elided.
	 := funcID_normal

	// Remove pc from stk; we'll re-add it below.
	 = [:len()-1]

	// See inline expansion in gentraceback.
	var  pcvalueCache
	 := (*[1 << 20]inlinedCall)()
	for {
		 := pcdatavalue(, _PCDATA_InlTreeIndex, , &)
		if  < 0 {
			break
		}
		if [].funcID == funcID_wrapper && elideWrapperCalling() {
			// ignore wrappers
		} else {
			 = append(, )
		}
		 = [].funcID
		// Back up to an instruction in the "caller".
		 = .entry + uintptr([].parentPc)
		 =  + 1
	}

	// N.B. we want to keep the last parentPC which is not inline.
	 = append(, )

	return 
}

// expandCgoFrames expands frame information for pc, known to be
// a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
// returns nil if pc could not be expanded.
func ( uintptr) []Frame {
	 := cgoSymbolizerArg{pc: }
	callCgoSymbolizer(&)

	if .file == nil && .funcName == nil {
		// No useful information from symbolizer.
		return nil
	}

	var  []Frame
	for {
		 = append(, Frame{
			PC:       ,
			Func:     nil,
			Function: gostring(.funcName),
			File:     gostring(.file),
			Line:     int(.lineno),
			Entry:    .entry,
			// funcInfo is zero, which implies !funcInfo.valid().
			// That ensures that we use the File/Line info given here.
		})
		if .more == 0 {
			break
		}
		callCgoSymbolizer(&)
	}

	// No more frames for this PC. Tell the symbolizer we are done.
	// We don't try to maintain a single cgoSymbolizerArg for the
	// whole use of Frames, because there would be no good way to tell
	// the symbolizer when we are done.
	.pc = 0
	callCgoSymbolizer(&)

	return 
}

// NOTE: Func does not expose the actual unexported fields, because we return *Func
// values to users, and we want to keep them from being able to overwrite the data
// with (say) *f = Func{}.
// All code operating on a *Func must call raw() to get the *_func
// or funcInfo() to get the funcInfo instead.

// A Func represents a Go function in the running binary.
type Func struct {
	opaque struct{} // unexported field to disallow conversions
}

func ( *Func) () *_func {
	return (*_func)(unsafe.Pointer())
}

func ( *Func) () funcInfo {
	 := .raw()
	return funcInfo{, findmoduledatap(.entry)}
}

// PCDATA and FUNCDATA table indexes.
//
// See funcdata.h and ../cmd/internal/objabi/funcdata.go.
const (
	_PCDATA_UnsafePoint   = 0
	_PCDATA_StackMapIndex = 1
	_PCDATA_InlTreeIndex  = 2

	_FUNCDATA_ArgsPointerMaps    = 0
	_FUNCDATA_LocalsPointerMaps  = 1
	_FUNCDATA_StackObjects       = 2
	_FUNCDATA_InlTree            = 3
	_FUNCDATA_OpenCodedDeferInfo = 4

	_ArgsSizeUnknown = -0x80000000
)

const (
	// PCDATA_UnsafePoint values.
	_PCDATA_UnsafePointSafe   = -1 // Safe for async preemption
	_PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption

	// _PCDATA_Restart1(2) apply on a sequence of instructions, within
	// which if an async preemption happens, we should back off the PC
	// to the start of the sequence when resume.
	// We need two so we can distinguish the start/end of the sequence
	// in case that two sequences are next to each other.
	_PCDATA_Restart1 = -3
	_PCDATA_Restart2 = -4

	// Like _PCDATA_RestartAtEntry, but back to function entry if async
	// preempted.
	_PCDATA_RestartAtEntry = -5
)

// A FuncID identifies particular functions that need to be treated
// specially by the runtime.
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in cmd/internal/objabi/funcid.go.
type funcID uint8

const (
	funcID_normal funcID = iota // not a special function
	funcID_runtime_main
	funcID_goexit
	funcID_jmpdefer
	funcID_mcall
	funcID_morestack
	funcID_mstart
	funcID_rt0_go
	funcID_asmcgocall
	funcID_sigpanic
	funcID_runfinq
	funcID_gcBgMarkWorker
	funcID_systemstack_switch
	funcID_systemstack
	funcID_cgocallback
	funcID_gogo
	funcID_externalthreadhandler
	funcID_debugCallV1
	funcID_gopanic
	funcID_panicwrap
	funcID_handleAsyncEvent
	funcID_asyncPreempt
	funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
)

// pcHeader holds data used by the pclntab lookups.
type pcHeader struct {
	magic          uint32  // 0xFFFFFFFA
	pad1, pad2     uint8   // 0,0
	minLC          uint8   // min instruction size
	ptrSize        uint8   // size of a ptr in bytes
	nfunc          int     // number of functions in the module
	nfiles         uint    // number of entries in the file tab.
	funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
	cuOffset       uintptr // offset to the cutab variable from pcHeader
	filetabOffset  uintptr // offset to the filetab variable from pcHeader
	pctabOffset    uintptr // offset to the pctab varible from pcHeader
	pclnOffset     uintptr // offset to the pclntab variable from pcHeader
}

// moduledata records information about the layout of the executable
// image. It is written by the linker. Any changes here must be
// matched changes to the code in cmd/internal/ld/symtab.go:symtab.
// moduledata is stored in statically allocated non-pointer memory;
// none of the pointers here are visible to the garbage collector.
type moduledata struct {
	pcHeader     *pcHeader
	funcnametab  []byte
	cutab        []uint32
	filetab      []byte
	pctab        []byte
	pclntable    []byte
	ftab         []functab
	findfunctab  uintptr
	minpc, maxpc uintptr

	text, etext           uintptr
	noptrdata, enoptrdata uintptr
	data, edata           uintptr
	bss, ebss             uintptr
	noptrbss, enoptrbss   uintptr
	end, gcdata, gcbss    uintptr
	types, etypes         uintptr

	textsectmap []textsect
	typelinks   []int32 // offsets from types
	itablinks   []*itab

	ptab []ptabEntry

	pluginpath string
	pkghashes  []modulehash

	modulename   string
	modulehashes []modulehash

	hasmain uint8 // 1 if module contains the main function, 0 otherwise

	gcdatamask, gcbssmask bitvector

	typemap map[typeOff]*_type // offset to *_rtype in previous module

	bad bool // module failed to load and should be ignored

	next *moduledata
}

// A modulehash is used to compare the ABI of a new module or a
// package in a new module with the loaded program.
//
// For each shared library a module links against, the linker creates an entry in the
// moduledata.modulehashes slice containing the name of the module, the abi hash seen
// at link time and a pointer to the runtime abi hash. These are checked in
// moduledataverify1 below.
//
// For each loaded plugin, the pkghashes slice has a modulehash of the
// newly loaded package that can be used to check the plugin's version of
// a package against any previously loaded version of the package.
// This is done in plugin.lastmoduleinit.
type modulehash struct {
	modulename   string
	linktimehash string
	runtimehash  *string
}

// pinnedTypemaps are the map[typeOff]*_type from the moduledata objects.
//
// These typemap objects are allocated at run time on the heap, but the
// only direct reference to them is in the moduledata, created by the
// linker and marked SNOPTRDATA so it is ignored by the GC.
//
// To make sure the map isn't collected, we keep a second reference here.
var pinnedTypemaps []map[typeOff]*_type

var firstmoduledata moduledata  // linker symbol
var lastmoduledatap *moduledata // linker symbol
var modulesSlice *[]*moduledata // see activeModules

// activeModules returns a slice of active modules.
//
// A module is active once its gcdatamask and gcbssmask have been
// assembled and it is usable by the GC.
//
// This is nosplit/nowritebarrier because it is called by the
// cgo pointer checking code.
//go:nosplit
//go:nowritebarrier
func () []*moduledata {
	 := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
	if  == nil {
		return nil
	}
	return *
}

// modulesinit creates the active modules slice out of all loaded modules.
//
// When a module is first loaded by the dynamic linker, an .init_array
// function (written by cmd/link) is invoked to call addmoduledata,
// appending to the module to the linked list that starts with
// firstmoduledata.
//
// There are two times this can happen in the lifecycle of a Go
// program. First, if compiled with -linkshared, a number of modules
// built with -buildmode=shared can be loaded at program initialization.
// Second, a Go program can load a module while running that was built
// with -buildmode=plugin.
//
// After loading, this function is called which initializes the
// moduledata so it is usable by the GC and creates a new activeModules
// list.
//
// Only one goroutine may call modulesinit at a time.
func () {
	 := new([]*moduledata)
	for  := &firstmoduledata;  != nil;  = .next {
		if .bad {
			continue
		}
		* = append(*, )
		if .gcdatamask == (bitvector{}) {
			.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(.gcdata)), .edata-.data)
			.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(.gcbss)), .ebss-.bss)
		}
	}

	// Modules appear in the moduledata linked list in the order they are
	// loaded by the dynamic loader, with one exception: the
	// firstmoduledata itself the module that contains the runtime. This
	// is not always the first module (when using -buildmode=shared, it
	// is typically libstd.so, the second module). The order matters for
	// typelinksinit, so we swap the first module with whatever module
	// contains the main function.
	//
	// See Issue #18729.
	for ,  := range * {
		if .hasmain != 0 {
			(*)[0] = 
			(*)[] = &firstmoduledata
			break
		}
	}

	atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer())
}

type functab struct {
	entry   uintptr
	funcoff uintptr
}

// Mapping information for secondary text sections

type textsect struct {
	vaddr    uintptr // prelinked section vaddr
	length   uintptr // section length
	baseaddr uintptr // relocated section address
}

const minfunc = 16                 // minimum function size
const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table

// findfunctab is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
	idx        uint32
	subbuckets [16]byte
}

func () {
	for  := &firstmoduledata;  != nil;  = .next {
		moduledataverify1()
	}
}

const debugPcln = false

func ( *moduledata) {
	// Check that the pclntab's format is valid.
	 := .pcHeader
	if .magic != 0xfffffffa || .pad1 != 0 || .pad2 != 0 || .minLC != sys.PCQuantum || .ptrSize != sys.PtrSize {
		println("runtime: function symbol table header:", hex(.magic), hex(.pad1), hex(.pad2), hex(.minLC), hex(.ptrSize))
		throw("invalid function symbol table\n")
	}

	// ftab is lookup table for function by program counter.
	 := len(.ftab) - 1
	for  := 0;  < ; ++ {
		// NOTE: ftab[nftab].entry is legal; it is the address beyond the final function.
		if .ftab[].entry > .ftab[+1].entry {
			 := funcInfo{(*_func)(unsafe.Pointer(&.pclntable[.ftab[].funcoff])), }
			 := funcInfo{(*_func)(unsafe.Pointer(&.pclntable[.ftab[+1].funcoff])), }
			 := "end"
			if +1 <  {
				 = funcname()
			}
			println("function symbol table not sorted by program counter:", hex(.ftab[].entry), funcname(), ">", hex(.ftab[+1].entry), )
			for  := 0;  <= ; ++ {
				print("\t", hex(.ftab[].entry), " ", funcname(funcInfo{(*_func)(unsafe.Pointer(&.pclntable[.ftab[].funcoff])), }), "\n")
			}
			if GOOS == "aix" && isarchive {
				println("-Wl,-bnoobjreorder is mandatory on aix/ppc64 with c-archive")
			}
			throw("invalid runtime symbol table")
		}
	}

	if .minpc != .ftab[0].entry ||
		.maxpc != .ftab[].entry {
		throw("minpc or maxpc invalid")
	}

	for ,  := range .modulehashes {
		if .linktimehash != *.runtimehash {
			println("abi mismatch detected between", .modulename, "and", .modulename)
			throw("abi mismatch")
		}
	}
}

// FuncForPC returns a *Func describing the function that contains the
// given program counter address, or else nil.
//
// If pc represents multiple functions because of inlining, it returns
// the *Func describing the innermost function, but with an entry of
// the outermost function.
func ( uintptr) *Func {
	 := findfunc()
	if !.valid() {
		return nil
	}
	if  := funcdata(, _FUNCDATA_InlTree);  != nil {
		// Note: strict=false so bad PCs (those between functions) don't crash the runtime.
		// We just report the preceding function in that situation. See issue 29735.
		// TODO: Perhaps we should report no function at all in that case.
		// The runtime currently doesn't have function end info, alas.
		if  := pcdatavalue1(, _PCDATA_InlTreeIndex, , nil, false);  >= 0 {
			 := (*[1 << 20]inlinedCall)()
			 := funcnameFromNameoff(, [].func_)
			,  := funcline(, )
			 := &funcinl{
				entry: .entry, // entry of the real (the outermost) function.
				name:  ,
				file:  ,
				line:  int(),
			}
			return (*Func)(unsafe.Pointer())
		}
	}
	return ._Func()
}

// Name returns the name of the function.
func ( *Func) () string {
	if  == nil {
		return ""
	}
	 := .raw()
	if .entry == 0 { // inlined version
		 := (*funcinl)(unsafe.Pointer())
		return .name
	}
	return funcname(.funcInfo())
}

// Entry returns the entry address of the function.
func ( *Func) () uintptr {
	 := .raw()
	if .entry == 0 { // inlined version
		 := (*funcinl)(unsafe.Pointer())
		return .entry
	}
	return .entry
}

// FileLine returns the file name and line number of the
// source code corresponding to the program counter pc.
// The result will not be accurate if pc is not a program
// counter within f.
func ( *Func) ( uintptr) ( string,  int) {
	 := .raw()
	if .entry == 0 { // inlined version
		 := (*funcinl)(unsafe.Pointer())
		return .file, .line
	}
	// Pass strict=false here, because anyone can call this function,
	// and they might just be wrong about targetpc belonging to f.
	,  := funcline1(.funcInfo(), , false)
	return , int()
}

func ( uintptr) *moduledata {
	for  := &firstmoduledata;  != nil;  = .next {
		if .minpc <=  &&  < .maxpc {
			return 
		}
	}
	return nil
}

type funcInfo struct {
	*_func
	datap *moduledata
}

func ( funcInfo) () bool {
	return ._func != nil
}

func ( funcInfo) () *Func {
	return (*Func)(unsafe.Pointer(._func))
}

func ( uintptr) funcInfo {
	 := findmoduledatap()
	if  == nil {
		return funcInfo{}
	}
	const  = uintptr(len(findfuncbucket{}.subbuckets))

	 :=  - .minpc
	 :=  / pcbucketsize
	 :=  % pcbucketsize / (pcbucketsize / )

	 := (*findfuncbucket)(add(unsafe.Pointer(.findfunctab), *unsafe.Sizeof(findfuncbucket{})))
	 := .idx + uint32(.subbuckets[])

	// If the idx is beyond the end of the ftab, set it to the end of the table and search backward.
	// This situation can occur if multiple text sections are generated to handle large text sections
	// and the linker has inserted jump tables between them.

	if  >= uint32(len(.ftab)) {
		 = uint32(len(.ftab) - 1)
	}
	if  < .ftab[].entry {
		// With multiple text sections, the idx might reference a function address that
		// is higher than the pc being searched, so search backward until the matching address is found.

		for .ftab[].entry >  &&  > 0 {
			--
		}
		if  == 0 {
			throw("findfunc: bad findfunctab entry idx")
		}
	} else {
		// linear search to find func with pc >= entry.
		for .ftab[+1].entry <=  {
			++
		}
	}
	 := .ftab[].funcoff
	if  == ^uintptr(0) {
		// With multiple text sections, there may be functions inserted by the external
		// linker that are not known by Go. This means there may be holes in the PC
		// range covered by the func table. The invalid funcoff value indicates a hole.
		// See also cmd/link/internal/ld/pcln.go:pclntab
		return funcInfo{}
	}
	return funcInfo{(*_func)(unsafe.Pointer(&.pclntable[])), }
}

type pcvalueCache struct {
	entries [2][8]pcvalueCacheEnt
}

type pcvalueCacheEnt struct {
	// targetpc and off together are the key of this cache entry.
	targetpc uintptr
	off      uint32
	// val is the value of this cached pcvalue entry.
	val int32
}

// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
// It must be very cheap to calculate.
// For now, align to sys.PtrSize and reduce mod the number of entries.
// In practice, this appears to be fairly randomly and evenly distributed.
func ( uintptr) uintptr {
	return ( / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}

// Returns the PCData value, and the PC where this value starts.
// TODO: the start PC is returned only when cache is nil.
func ( funcInfo,  uint32,  uintptr,  *pcvalueCache,  bool) (int32, uintptr) {
	if  == 0 {
		return -1, 0
	}

	// Check the cache. This speeds up walks of deep stacks, which
	// tend to have the same recursive functions over and over.
	//
	// This cache is small enough that full associativity is
	// cheaper than doing the hashing for a less associative
	// cache.
	if  != nil {
		 := pcvalueCacheKey()
		for  := range .entries[] {
			// We check off first because we're more
			// likely to have multiple entries with
			// different offsets for the same targetpc
			// than the other way around, so we'll usually
			// fail in the first clause.
			 := &.entries[][]
			if .off ==  && .targetpc ==  {
				return .val, 0
			}
		}
	}

	if !.valid() {
		if  && panicking == 0 {
			print("runtime: no module data for ", hex(.entry), "\n")
			throw("no module data")
		}
		return -1, 0
	}
	 := .datap
	 := .pctab[:]
	 := .entry
	 := 
	 := int32(-1)
	for {
		var  bool
		,  = step(, &, &,  == .entry)
		if ! {
			break
		}
		if  <  {
			// Replace a random entry in the cache. Random
			// replacement prevents a performance cliff if
			// a recursive stack's cycle is slightly
			// larger than the cache.
			// Put the new element at the beginning,
			// since it is the most likely to be newly used.
			if  != nil {
				 := pcvalueCacheKey()
				 := &.entries[]
				 := fastrand() % uint32(len(.entries[]))
				[] = [0]
				[0] = pcvalueCacheEnt{
					targetpc: ,
					off:      ,
					val:      ,
				}
			}

			return , 
		}
		 = 
	}

	// If there was a table, it should have covered all program counters.
	// If not, something is wrong.
	if panicking != 0 || ! {
		return -1, 0
	}

	print("runtime: invalid pc-encoded table f=", funcname(), " pc=", hex(), " targetpc=", hex(), " tab=", , "\n")

	 = .pctab[:]
	 = .entry
	 = -1
	for {
		var  bool
		,  = step(, &, &,  == .entry)
		if ! {
			break
		}
		print("\tvalue=", , " until pc=", hex(), "\n")
	}

	throw("invalid runtime symbol table")
	return -1, 0
}

func ( funcInfo) *byte {
	if !.valid() || .nameoff == 0 {
		return nil
	}
	return &.datap.funcnametab[.nameoff]
}

func ( funcInfo) string {
	return gostringnocopy(cfuncname())
}

func ( funcInfo) string {
	 := funcname()
	 := len() - 1
	for ;  > 0; -- {
		if [] == '/' {
			break
		}
	}
	for ;  < len(); ++ {
		if [] == '.' {
			break
		}
	}
	return [:]
}

func ( funcInfo,  int32) *byte {
	if !.valid() {
		return nil
	}
	return &.datap.funcnametab[]
}

func ( funcInfo,  int32) string {
	return gostringnocopy(cfuncnameFromNameoff(, ))
}

func ( funcInfo,  int32) string {
	 := .datap
	if !.valid() {
		return "?"
	}
	// Make sure the cu index and file offset are valid
	if  := .cutab[.cuOffset+uint32()];  != ^uint32(0) {
		return gostringnocopy(&.filetab[])
	}
	// pcln section is corrupt.
	return "?"
}

func ( funcInfo,  uintptr,  bool) ( string,  int32) {
	 := .datap
	if !.valid() {
		return "?", 0
	}
	,  := pcvalue(, .pcfile, , nil, )
	, _ = pcvalue(, .pcln, , nil, )
	if  == -1 ||  == -1 || int() >= len(.filetab) {
		// print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
		return "?", 0
	}
	 = funcfile(, )
	return
}

func ( funcInfo,  uintptr) ( string,  int32) {
	return funcline1(, , true)
}

func ( funcInfo,  uintptr,  *pcvalueCache) int32 {
	,  := pcvalue(, .pcsp, , , true)
	if &(sys.PtrSize-1) != 0 {
		print("invalid spdelta ", funcname(), " ", hex(.entry), " ", hex(), " ", hex(.pcsp), " ", , "\n")
	}
	return 
}

// funcMaxSPDelta returns the maximum spdelta at any point in f.
func ( funcInfo) int32 {
	 := .datap
	 := .pctab[.pcsp:]
	 := .entry
	 := int32(-1)
	 := int32(0)
	for {
		var  bool
		,  = step(, &, &,  == .entry)
		if ! {
			return 
		}
		if  >  {
			 = 
		}
	}
}

func ( funcInfo,  uint32) uint32 {
	return *(*uint32)(add(unsafe.Pointer(&.nfuncdata), unsafe.Sizeof(.nfuncdata)+uintptr()*4))
}

func ( funcInfo,  uint32,  uintptr,  *pcvalueCache) int32 {
	if  >= .npcdata {
		return -1
	}
	,  := pcvalue(, pcdatastart(, ), , , true)
	return 
}

func ( funcInfo,  uint32,  uintptr,  *pcvalueCache,  bool) int32 {
	if  >= .npcdata {
		return -1
	}
	,  := pcvalue(, pcdatastart(, ), , , )
	return 
}

// Like pcdatavalue, but also return the start PC of this PCData value.
// It doesn't take a cache.
func ( funcInfo,  uint32,  uintptr) (int32, uintptr) {
	if  >= .npcdata {
		return -1, 0
	}
	return pcvalue(, pcdatastart(, ), , nil, true)
}

func ( funcInfo,  uint8) unsafe.Pointer {
	if  < 0 ||  >= .nfuncdata {
		return nil
	}
	 := add(unsafe.Pointer(&.nfuncdata), unsafe.Sizeof(.nfuncdata)+uintptr(.npcdata)*4)
	if sys.PtrSize == 8 && uintptr()&4 != 0 {
		if uintptr(unsafe.Pointer(._func))&4 != 0 {
			println("runtime: misaligned func", ._func)
		}
		 = add(, 4)
	}
	return *(*unsafe.Pointer)(add(, uintptr()*sys.PtrSize))
}

// step advances to the next pc, value pair in the encoded table.
func ( []byte,  *uintptr,  *int32,  bool) ( []byte,  bool) {
	// For both uvdelta and pcdelta, the common case (~70%)
	// is that they are a single byte. If so, avoid calling readvarint.
	 := uint32([0])
	if  == 0 && ! {
		return nil, false
	}
	 := uint32(1)
	if &0x80 != 0 {
		,  = readvarint()
	}
	* += int32(-( & 1) ^ ( >> 1))
	 = [:]

	 := uint32([0])
	 = 1
	if &0x80 != 0 {
		,  = readvarint()
	}
	 = [:]
	* += uintptr( * sys.PCQuantum)
	return , true
}

// readvarint reads a varint from p.
func ( []byte) ( uint32,  uint32) {
	var , ,  uint32
	for {
		 := []
		++
		 |= uint32(&0x7F) << ( & 31)
		if &0x80 == 0 {
			break
		}
		 += 7
	}
	return , 
}

type stackmap struct {
	n        int32   // number of bitmaps
	nbit     int32   // number of bits in each bitmap
	bytedata [1]byte // bitmaps, each starting on a byte boundary
}

//go:nowritebarrier
func ( *stackmap,  int32) bitvector {
	// Check this invariant only when stackDebug is on at all.
	// The invariant is already checked by many of stackmapdata's callers,
	// and disabling it by default allows stackmapdata to be inlined.
	if stackDebug > 0 && ( < 0 ||  >= .n) {
		throw("stackmapdata: index out of range")
	}
	return bitvector{.nbit, addb(&.bytedata[0], uintptr(*((.nbit+7)>>3)))}
}

// inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
type inlinedCall struct {
	parent   int16  // index of parent in the inltree, or < 0
	funcID   funcID // type of the called function
	_        byte
	file     int32 // perCU file index for inlined call. See cmd/link:pcln.go
	line     int32 // line number of the call site
	func_    int32 // offset into pclntab for name of called function
	parentPc int32 // position of an instruction whose source position is the call site (offset from entry)
}