package runtime
import (
)
const (
traceEvNone = 0
traceEvBatch = 1
traceEvFrequency = 2
traceEvStack = 3
traceEvGomaxprocs = 4
traceEvProcStart = 5
traceEvProcStop = 6
traceEvGCStart = 7
traceEvGCDone = 8
traceEvGCSTWStart = 9
traceEvGCSTWDone = 10
traceEvGCSweepStart = 11
traceEvGCSweepDone = 12
traceEvGoCreate = 13
traceEvGoStart = 14
traceEvGoEnd = 15
traceEvGoStop = 16
traceEvGoSched = 17
traceEvGoPreempt = 18
traceEvGoSleep = 19
traceEvGoBlock = 20
traceEvGoUnblock = 21
traceEvGoBlockSend = 22
traceEvGoBlockRecv = 23
traceEvGoBlockSelect = 24
traceEvGoBlockSync = 25
traceEvGoBlockCond = 26
traceEvGoBlockNet = 27
traceEvGoSysCall = 28
traceEvGoSysExit = 29
traceEvGoSysBlock = 30
traceEvGoWaiting = 31
traceEvGoInSyscall = 32
traceEvHeapAlloc = 33
traceEvNextGC = 34
traceEvTimerGoroutine = 35
traceEvFutileWakeup = 36
traceEvString = 37
traceEvGoStartLocal = 38
traceEvGoUnblockLocal = 39
traceEvGoSysExitLocal = 40
traceEvGoStartLabel = 41
traceEvGoBlockGC = 42
traceEvGCMarkAssistStart = 43
traceEvGCMarkAssistDone = 44
traceEvUserTaskCreate = 45
traceEvUserTaskEnd = 46
traceEvUserRegion = 47
traceEvUserLog = 48
traceEvCount = 49
)
const (
traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
traceStackSize = 128
traceGlobProc = -1
traceBytesPerNumber = 10
traceArgCountShift = 6
traceFutileWakeup byte = 128
)
var trace struct {
lock mutex
lockOwner *g
enabled bool
shutdown bool
headerWritten bool
footerWritten bool
shutdownSema uint32
seqStart uint64
ticksStart int64
ticksEnd int64
timeStart int64
timeEnd int64
seqGC uint64
reading traceBufPtr
empty traceBufPtr
fullHead traceBufPtr
fullTail traceBufPtr
reader guintptr
stackTab traceStackTable
stringsLock mutex
strings map[string]uint64
stringSeq uint64
markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
bufLock mutex
buf traceBufPtr
}
type traceBufHeader struct {
link traceBufPtr
lastTicks uint64
pos int
stk [traceStackSize]uintptr
}
type traceBuf struct {
traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
}
type traceBufPtr uintptr
func ( traceBufPtr) () *traceBuf { return (*traceBuf)(unsafe.Pointer()) }
func ( *traceBufPtr) ( *traceBuf) { * = traceBufPtr(unsafe.Pointer()) }
func ( *traceBuf) traceBufPtr {
return traceBufPtr(unsafe.Pointer())
}
func () error {
stopTheWorldGC("start tracing")
lock(&sched.sysmonlock)
lock(&trace.bufLock)
if trace.enabled || trace.shutdown {
unlock(&trace.bufLock)
unlock(&sched.sysmonlock)
startTheWorldGC()
return errorString("tracing is already enabled")
}
:= getg()
.m.startingtrace = true
:= acquirem()
:= make([]uintptr, traceStackSize)
:= traceStackID(, , 2)
releasem()
for , := range allgs {
:= readgstatus()
if != _Gdead {
.traceseq = 0
.tracelastp = getg().m.p
:= trace.stackTab.put([]uintptr{.startpc + sys.PCQuantum})
traceEvent(traceEvGoCreate, -1, uint64(.goid), uint64(), )
}
if == _Gwaiting {
.traceseq++
traceEvent(traceEvGoWaiting, -1, uint64(.goid))
}
if == _Gsyscall {
.traceseq++
traceEvent(traceEvGoInSyscall, -1, uint64(.goid))
} else {
.sysblocktraced = false
}
}
traceProcStart()
traceGoStart()
trace.ticksStart = cputicks()
trace.timeStart = nanotime()
trace.headerWritten = false
trace.footerWritten = false
trace.stringSeq = 0
trace.strings = make(map[string]uint64)
trace.seqGC = 0
.m.startingtrace = false
trace.enabled = true
, , := traceAcquireBuffer()
for , := range gcMarkWorkerModeStrings[:] {
trace.markWorkerLabels[], = traceString(, , )
}
traceReleaseBuffer()
unlock(&trace.bufLock)
unlock(&sched.sysmonlock)
startTheWorldGC()
return nil
}
func () {
stopTheWorldGC("stop tracing")
lock(&sched.sysmonlock)
lock(&trace.bufLock)
if !trace.enabled {
unlock(&trace.bufLock)
unlock(&sched.sysmonlock)
startTheWorldGC()
return
}
traceGoSched()
for , := range allp[:cap(allp)] {
:= .tracebuf
if != 0 {
traceFullQueue()
.tracebuf = 0
}
}
if trace.buf != 0 {
:= trace.buf
trace.buf = 0
if .ptr().pos != 0 {
traceFullQueue()
}
}
for {
trace.ticksEnd = cputicks()
trace.timeEnd = nanotime()
if trace.timeEnd != trace.timeStart {
break
}
osyield()
}
trace.enabled = false
trace.shutdown = true
unlock(&trace.bufLock)
unlock(&sched.sysmonlock)
startTheWorldGC()
semacquire(&trace.shutdownSema)
if raceenabled {
raceacquire(unsafe.Pointer(&trace.shutdownSema))
}
lock(&trace.lock)
for , := range allp[:cap(allp)] {
if .tracebuf != 0 {
throw("trace: non-empty trace buffer in proc")
}
}
if trace.buf != 0 {
throw("trace: non-empty global trace buffer")
}
if trace.fullHead != 0 || trace.fullTail != 0 {
throw("trace: non-empty full trace buffer")
}
if trace.reading != 0 || trace.reader != 0 {
throw("trace: reading after shutdown")
}
for trace.empty != 0 {
:= trace.empty
trace.empty = .ptr().link
sysFree(unsafe.Pointer(), unsafe.Sizeof(*.ptr()), &memstats.other_sys)
}
trace.strings = nil
trace.shutdown = false
unlock(&trace.lock)
}
func () []byte {
lock(&trace.lock)
trace.lockOwner = getg()
if trace.reader != 0 {
trace.lockOwner = nil
unlock(&trace.lock)
println("runtime: ReadTrace called from multiple goroutines simultaneously")
return nil
}
if := trace.reading; != 0 {
.ptr().link = trace.empty
trace.empty =
trace.reading = 0
}
if !trace.headerWritten {
trace.headerWritten = true
trace.lockOwner = nil
unlock(&trace.lock)
return []byte("go 1.11 trace\x00\x00\x00")
}
if trace.fullHead == 0 && !trace.shutdown {
trace.reader.set(getg())
goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
lock(&trace.lock)
}
if trace.fullHead != 0 {
:= traceFullDequeue()
trace.reading =
trace.lockOwner = nil
unlock(&trace.lock)
return .ptr().arr[:.ptr().pos]
}
if !trace.footerWritten {
trace.footerWritten = true
:= float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
trace.lockOwner = nil
unlock(&trace.lock)
var []byte
= append(, traceEvFrequency|0<<traceArgCountShift)
= traceAppend(, uint64())
trace.stackTab.dump()
return
}
if trace.shutdown {
trace.lockOwner = nil
unlock(&trace.lock)
if raceenabled {
racerelease(unsafe.Pointer(&trace.shutdownSema))
}
semrelease(&trace.shutdownSema)
return nil
}
trace.lockOwner = nil
unlock(&trace.lock)
println("runtime: spurious wakeup of trace reader")
return nil
}
func () *g {
if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
return nil
}
lock(&trace.lock)
if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
unlock(&trace.lock)
return nil
}
:= trace.reader.ptr()
trace.reader.set(nil)
unlock(&trace.lock)
return
}
func ( *p) {
:= .tracebuf
.tracebuf = 0
if == 0 {
return
}
lock(&trace.lock)
traceFullQueue()
unlock(&trace.lock)
}
func ( traceBufPtr) {
.ptr().link = 0
if trace.fullHead == 0 {
trace.fullHead =
} else {
trace.fullTail.ptr().link =
}
trace.fullTail =
}
func () traceBufPtr {
:= trace.fullHead
if == 0 {
return 0
}
trace.fullHead = .ptr().link
if trace.fullHead == 0 {
trace.fullTail = 0
}
.ptr().link = 0
return
}
func ( byte, int, ...uint64) {
, , := traceAcquireBuffer()
if !trace.enabled && !.startingtrace {
traceReleaseBuffer()
return
}
if > 0 {
if getg() == .curg {
++
}
}
traceEventLocked(0, , , , , , ...)
traceReleaseBuffer()
}
func ( int, *m, int32, *traceBufPtr, byte, int, ...uint64) {
:= .ptr()
:= 2 + 5*traceBytesPerNumber +
if == nil || len(.arr)-.pos < {
= traceFlush(traceBufPtrOf(), ).ptr()
.set()
}
:= uint64(cputicks()) / traceTickDiv
:= - .lastTicks
.lastTicks =
:= byte(len())
if >= 0 {
++
}
if > 3 {
= 3
}
:= .pos
.byte( | <<traceArgCountShift)
var *byte
if == 3 {
.varint(0)
= &.arr[.pos-1]
}
.varint()
for , := range {
.varint()
}
if == 0 {
.varint(0)
} else if > 0 {
.varint(traceStackID(, .stk[:], ))
}
:= .pos -
if > {
throw("invalid length of trace event")
}
if != nil {
* = byte( - 2)
}
}
func ( *m, []uintptr, int) uint64 {
:= getg()
:= .curg
var int
if == {
= callers(+1, )
} else if != nil {
= .curg
= gcallers(, , )
}
if > 0 {
--
}
if > 0 && .goid == 1 {
--
}
:= trace.stackTab.put([:])
return uint64()
}
func () ( *m, int32, *traceBufPtr) {
= acquirem()
if := .p.ptr(); != nil {
return , .id, &.tracebuf
}
lock(&trace.bufLock)
return , traceGlobProc, &trace.buf
}
func ( int32) {
if == traceGlobProc {
unlock(&trace.bufLock)
}
releasem(getg().m)
}
func ( traceBufPtr, int32) traceBufPtr {
:= trace.lockOwner
:= == nil || != getg().m.curg
if {
lock(&trace.lock)
}
if != 0 {
traceFullQueue()
}
if trace.empty != 0 {
= trace.empty
trace.empty = .ptr().link
} else {
= traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
if == 0 {
throw("trace: out of memory")
}
}
:= .ptr()
.link.set(nil)
.pos = 0
:= uint64(cputicks()) / traceTickDiv
.lastTicks =
.byte(traceEvBatch | 1<<traceArgCountShift)
.varint(uint64())
.varint()
if {
unlock(&trace.lock)
}
return
}
func ( *traceBufPtr, int32, string) (uint64, *traceBufPtr) {
if == "" {
return 0,
}
lock(&trace.stringsLock)
if raceenabled {
raceacquire(unsafe.Pointer(&trace.stringsLock))
}
if , := trace.strings[]; {
if raceenabled {
racerelease(unsafe.Pointer(&trace.stringsLock))
}
unlock(&trace.stringsLock)
return ,
}
trace.stringSeq++
:= trace.stringSeq
trace.strings[] =
if raceenabled {
racerelease(unsafe.Pointer(&trace.stringsLock))
}
unlock(&trace.stringsLock)
:= .ptr()
:= 1 + 2*traceBytesPerNumber + len()
if == nil || len(.arr)-.pos < {
= traceFlush(traceBufPtrOf(), ).ptr()
.set()
}
.byte(traceEvString)
.varint()
:= len()
if := len(.arr) - .pos; < +traceBytesPerNumber {
=
}
.varint(uint64())
.pos += copy(.arr[.pos:], [:])
.set()
return ,
}
func ( []byte, uint64) []byte {
for ; >= 0x80; >>= 7 {
= append(, 0x80|byte())
}
= append(, byte())
return
}
func ( *traceBuf) ( uint64) {
:= .pos
for ; >= 0x80; >>= 7 {
.arr[] = 0x80 | byte()
++
}
.arr[] = byte()
++
.pos =
}
func ( *traceBuf) ( byte) {
.arr[.pos] =
.pos++
}
type traceStackTable struct {
lock mutex
seq uint32
mem traceAlloc
tab [1 << 13]traceStackPtr
}
type traceStack struct {
link traceStackPtr
hash uintptr
id uint32
n int
stk [0]uintptr
}
type traceStackPtr uintptr
func ( traceStackPtr) () *traceStack { return (*traceStack)(unsafe.Pointer()) }
func ( *traceStack) () []uintptr {
return (*[traceStackSize]uintptr)(unsafe.Pointer(&.stk))[:.n]
}
func ( *traceStackTable) ( []uintptr) uint32 {
if len() == 0 {
return 0
}
:= memhash(unsafe.Pointer(&[0]), 0, uintptr(len())*unsafe.Sizeof([0]))
if := .find(, ); != 0 {
return
}
lock(&.lock)
if := .find(, ); != 0 {
unlock(&.lock)
return
}
.seq++
:= .newStack(len())
.hash =
.id = .seq
.n = len()
:= .stack()
for , := range {
[] =
}
:= int( % uintptr(len(.tab)))
.link = .tab[]
atomicstorep(unsafe.Pointer(&.tab[]), unsafe.Pointer())
unlock(&.lock)
return .id
}
func ( *traceStackTable) ( []uintptr, uintptr) uint32 {
:= int( % uintptr(len(.tab)))
:
for := .tab[].ptr(); != nil; = .link.ptr() {
if .hash == && .n == len() {
for , := range .stack() {
if != [] {
continue
}
}
return .id
}
}
return 0
}
func ( *traceStackTable) ( int) *traceStack {
return (*traceStack)(.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr()*sys.PtrSize))
}
func ( []uintptr) []Frame {
:= make([]Frame, 0, len())
:= CallersFrames()
for {
, := .Next()
= append(, )
if ! {
return
}
}
}
func ( *traceStackTable) () {
var [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
:= traceFlush(0, 0)
for , := range .tab {
:= .ptr()
for ; != nil; = .link.ptr() {
:= [:0]
= traceAppend(, uint64(.id))
:= allFrames(.stack())
= traceAppend(, uint64(len()))
for , := range {
var traceFrame
, = traceFrameForPC(, 0, )
= traceAppend(, uint64(.PC))
= traceAppend(, uint64(.funcID))
= traceAppend(, uint64(.fileID))
= traceAppend(, uint64(.line))
}
:= 1 + traceBytesPerNumber + len()
if := .ptr(); len(.arr)-.pos < {
= traceFlush(, 0)
}
:= .ptr()
.byte(traceEvStack | 3<<traceArgCountShift)
.varint(uint64(len()))
.pos += copy(.arr[.pos:], )
}
}
lock(&trace.lock)
traceFullQueue()
unlock(&trace.lock)
.mem.drop()
* = traceStackTable{}
lockInit(&((*).lock), lockRankTraceStackTab)
}
type traceFrame struct {
funcID uint64
fileID uint64
line uint64
}
func ( traceBufPtr, int32, Frame) (traceFrame, traceBufPtr) {
:= &
var traceFrame
:= .Function
const = 1 << 10
if len() > {
= [len()-:]
}
.funcID, = traceString(, , )
.line = uint64(.Line)
:= .File
if len() > {
= [len()-:]
}
.fileID, = traceString(, , )
return , (*)
}
type traceAlloc struct {
head traceAllocBlockPtr
off uintptr
}
type traceAllocBlock struct {
next traceAllocBlockPtr
data [64<<10 - sys.PtrSize]byte
}
type traceAllocBlockPtr uintptr
func ( traceAllocBlockPtr) () *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer()) }
func ( *traceAllocBlockPtr) ( *traceAllocBlock) { * = traceAllocBlockPtr(unsafe.Pointer()) }
func ( *traceAlloc) ( uintptr) unsafe.Pointer {
= alignUp(, sys.PtrSize)
if .head == 0 || .off+ > uintptr(len(.head.ptr().data)) {
if > uintptr(len(.head.ptr().data)) {
throw("trace: alloc too large")
}
:= (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
if == nil {
throw("trace: out of memory")
}
.next.set(.head.ptr())
.head.set()
.off = 0
}
:= &.head.ptr().data[.off]
.off +=
return unsafe.Pointer()
}
func ( *traceAlloc) () {
for .head != 0 {
:= .head.ptr()
.head.set(.next.ptr())
sysFree(unsafe.Pointer(), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
}
}
func ( int32) {
traceEvent(traceEvGomaxprocs, 1, uint64())
}
func () {
traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
}
func ( *p) {
:= acquirem()
:= .p
.p.set()
traceEvent(traceEvProcStop, -1)
.p =
releasem()
}
func () {
traceEvent(traceEvGCStart, 3, trace.seqGC)
trace.seqGC++
}
func () {
traceEvent(traceEvGCDone, -1)
}
func ( int) {
traceEvent(traceEvGCSTWStart, -1, uint64())
}
func () {
traceEvent(traceEvGCSTWDone, -1)
}
func () {
:= getg().m.p.ptr()
if .traceSweep {
throw("double traceGCSweepStart")
}
.traceSweep, .traceSwept, .traceReclaimed = true, 0, 0
}
func ( uintptr) {
:= getg().m.p.ptr()
if .traceSweep {
if .traceSwept == 0 {
traceEvent(traceEvGCSweepStart, 1)
}
.traceSwept +=
}
}
func () {
:= getg().m.p.ptr()
if !.traceSweep {
throw("missing traceGCSweepStart")
}
if .traceSwept != 0 {
traceEvent(traceEvGCSweepDone, -1, uint64(.traceSwept), uint64(.traceReclaimed))
}
.traceSweep = false
}
func () {
traceEvent(traceEvGCMarkAssistStart, 1)
}
func () {
traceEvent(traceEvGCMarkAssistDone, -1)
}
func ( *g, uintptr) {
.traceseq = 0
.tracelastp = getg().m.p
:= trace.stackTab.put([]uintptr{ + sys.PCQuantum})
traceEvent(traceEvGoCreate, 2, uint64(.goid), uint64())
}
func () {
:= getg().m.curg
:= .m.p
.traceseq++
if .ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
traceEvent(traceEvGoStartLabel, -1, uint64(.goid), .traceseq, trace.markWorkerLabels[.ptr().gcMarkWorkerMode])
} else if .tracelastp == {
traceEvent(traceEvGoStartLocal, -1, uint64(.goid))
} else {
.tracelastp =
traceEvent(traceEvGoStart, -1, uint64(.goid), .traceseq)
}
}
func () {
traceEvent(traceEvGoEnd, -1)
}
func () {
:= getg()
.tracelastp = .m.p
traceEvent(traceEvGoSched, 1)
}
func () {
:= getg()
.tracelastp = .m.p
traceEvent(traceEvGoPreempt, 1)
}
func ( byte, int) {
if &traceFutileWakeup != 0 {
traceEvent(traceEvFutileWakeup, -1)
}
traceEvent( & ^traceFutileWakeup, )
}
func ( *g, int) {
:= getg().m.p
.traceseq++
if .tracelastp == {
traceEvent(traceEvGoUnblockLocal, , uint64(.goid))
} else {
.tracelastp =
traceEvent(traceEvGoUnblock, , uint64(.goid), .traceseq)
}
}
func () {
traceEvent(traceEvGoSysCall, 1)
}
func ( int64) {
if != 0 && < trace.ticksStart {
= 0
}
:= getg().m.curg
.traceseq++
.tracelastp = .m.p
traceEvent(traceEvGoSysExit, -1, uint64(.goid), .traceseq, uint64()/traceTickDiv)
}
func ( *p) {
:= acquirem()
:= .p
.p.set()
traceEvent(traceEvGoSysBlock, -1)
.p =
releasem()
}
func () {
traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
}
func () {
if := atomic.Load64(&memstats.next_gc); == ^uint64(0) {
traceEvent(traceEvNextGC, -1, 0)
} else {
traceEvent(traceEvNextGC, -1, )
}
}
func (, uint64, string) {
if !trace.enabled {
return
}
, , := traceAcquireBuffer()
if !trace.enabled && !.startingtrace {
traceReleaseBuffer()
return
}
, := traceString(, , )
traceEventLocked(0, , , , traceEvUserTaskCreate, 3, , , )
traceReleaseBuffer()
}
func ( uint64) {
traceEvent(traceEvUserTaskEnd, 2, )
}
func (, uint64, string) {
if !trace.enabled {
return
}
, , := traceAcquireBuffer()
if !trace.enabled && !.startingtrace {
traceReleaseBuffer()
return
}
, := traceString(, , )
traceEventLocked(0, , , , traceEvUserRegion, 3, , , )
traceReleaseBuffer()
}
func ( uint64, , string) {
if !trace.enabled {
return
}
, , := traceAcquireBuffer()
if !trace.enabled && !.startingtrace {
traceReleaseBuffer()
return
}
, := traceString(, , )
:= traceBytesPerNumber + len()
traceEventLocked(, , , , traceEvUserLog, 3, , )
:= .ptr()
:= len()
if := len(.arr) - .pos; < +traceBytesPerNumber {
=
}
.varint(uint64())
.pos += copy(.arr[.pos:], [:])
traceReleaseBuffer()
}