Source file src/runtime/debuglog.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // This file provides an internal debug logging facility. The debug
     6  // log is a lightweight, in-memory, per-M ring buffer. By default, the
     7  // runtime prints the debug log on panic.
     8  //
     9  // To print something to the debug log, call dlog to obtain a dlogger
    10  // and use the methods on that to add values. The values will be
    11  // space-separated in the output (much like println).
    12  //
    13  // This facility can be enabled by passing -tags debuglog when
    14  // building. Without this tag, dlog calls compile to nothing.
    15  //
    16  // Implementation notes
    17  //
    18  // There are two implementations of the dlog interface: dloggerImpl and
    19  // dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
    20  // to one or the other depending on the debuglog build tag. However, both types
    21  // always exist and are always built. This helps ensure we compile as much of
    22  // the implementation as possible in the default build configuration, while also
    23  // enabling us to achieve good test coverage of the real debuglog implementation
    24  // even when the debuglog build tag is not set.
    25  
    26  package runtime
    27  
    28  import (
    29  	"internal/abi"
    30  	"internal/runtime/atomic"
    31  	"internal/runtime/sys"
    32  	"unsafe"
    33  )
    34  
    35  // debugLogBytes is the size of each per-M ring buffer. This is
    36  // allocated off-heap to avoid blowing up the M and hence the GC'd
    37  // heap size.
    38  const debugLogBytes = 16 << 10
    39  
    40  // debugLogStringLimit is the maximum number of bytes in a string.
    41  // Above this, the string will be truncated with "..(n more bytes).."
    42  const debugLogStringLimit = debugLogBytes / 8
    43  
    44  // dlog returns a debug logger. The caller can use methods on the
    45  // returned logger to add values, which will be space-separated in the
    46  // final output, much like println. The caller must call end() to
    47  // finish the message.
    48  //
    49  // dlog can be used from highly-constrained corners of the runtime: it
    50  // is safe to use in the signal handler, from within the write
    51  // barrier, from within the stack implementation, and in places that
    52  // must be recursively nosplit.
    53  //
    54  // This will be compiled away if built without the debuglog build tag.
    55  // However, argument construction may not be. If any of the arguments
    56  // are not literals or trivial expressions, consider protecting the
    57  // call with "if dlogEnabled".
    58  //
    59  //go:nosplit
    60  //go:nowritebarrierrec
    61  func dlog() dlogger {
    62  	// dlog1 is defined to either dlogImpl or dlogFake.
    63  	return dlog1()
    64  }
    65  
    66  //go:nosplit
    67  //go:nowritebarrierrec
    68  func dlogFake() dloggerFake {
    69  	return dloggerFake{}
    70  }
    71  
    72  //go:nosplit
    73  //go:nowritebarrierrec
    74  func dlogImpl() *dloggerImpl {
    75  	// Get the time.
    76  	tick, nano := uint64(cputicks()), uint64(nanotime())
    77  
    78  	// Try to get a cached logger.
    79  	l := getCachedDlogger()
    80  
    81  	// If we couldn't get a cached logger, try to get one from the
    82  	// global pool.
    83  	if l == nil {
    84  		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
    85  		all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
    86  		for l1 := all; l1 != nil; l1 = l1.allLink {
    87  			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
    88  				l = l1
    89  				break
    90  			}
    91  		}
    92  	}
    93  
    94  	// If that failed, allocate a new logger.
    95  	if l == nil {
    96  		// Use sysAllocOS instead of sysAlloc because we want to interfere
    97  		// with the runtime as little as possible, and sysAlloc updates accounting.
    98  		l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{})))
    99  		if l == nil {
   100  			throw("failed to allocate debug log")
   101  		}
   102  		l.w.r.data = &l.w.data
   103  		l.owned.Store(1)
   104  
   105  		// Prepend to allDloggers list.
   106  		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
   107  		for {
   108  			head := atomic.Loaduintptr(headp)
   109  			l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
   110  			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
   111  				break
   112  			}
   113  		}
   114  	}
   115  
   116  	// If the time delta is getting too high, write a new sync
   117  	// packet. We set the limit so we don't write more than 6
   118  	// bytes of delta in the record header.
   119  	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
   120  	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
   121  		l.w.writeSync(tick, nano)
   122  	}
   123  
   124  	// Reserve space for framing header.
   125  	l.w.ensure(debugLogHeaderSize)
   126  	l.w.write += debugLogHeaderSize
   127  
   128  	// Write record header.
   129  	l.w.uvarint(tick - l.w.tick)
   130  	l.w.uvarint(nano - l.w.nano)
   131  	gp := getg()
   132  	if gp != nil && gp.m != nil && gp.m.p != 0 {
   133  		l.w.varint(int64(gp.m.p.ptr().id))
   134  	} else {
   135  		l.w.varint(-1)
   136  	}
   137  
   138  	return l
   139  }
   140  
   141  // A dloggerImpl writes to the debug log.
   142  //
   143  // To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
   144  // end().
   145  type dloggerImpl struct {
   146  	_ sys.NotInHeap
   147  	w debugLogWriter
   148  
   149  	// allLink is the next dlogger in the allDloggers list.
   150  	allLink *dloggerImpl
   151  
   152  	// owned indicates that this dlogger is owned by an M. This is
   153  	// accessed atomically.
   154  	owned atomic.Uint32
   155  }
   156  
   157  // allDloggers is a list of all dloggers, linked through
   158  // dlogger.allLink. This is accessed atomically. This is prepend only,
   159  // so it doesn't need to protect against ABA races.
   160  var allDloggers *dloggerImpl
   161  
   162  // A dloggerFake is a no-op implementation of dlogger.
   163  type dloggerFake struct{}
   164  
   165  //go:nosplit
   166  func (l dloggerFake) end() {}
   167  
   168  //go:nosplit
   169  func (l *dloggerImpl) end() {
   170  	// Fill in framing header.
   171  	size := l.w.write - l.w.r.end
   172  	if !l.w.writeFrameAt(l.w.r.end, size) {
   173  		throw("record too large")
   174  	}
   175  
   176  	// Commit the record.
   177  	l.w.r.end = l.w.write
   178  
   179  	// Attempt to return this logger to the cache.
   180  	if putCachedDlogger(l) {
   181  		return
   182  	}
   183  
   184  	// Return the logger to the global pool.
   185  	l.owned.Store(0)
   186  }
   187  
   188  const (
   189  	debugLogUnknown = 1 + iota
   190  	debugLogBoolTrue
   191  	debugLogBoolFalse
   192  	debugLogInt
   193  	debugLogUint
   194  	debugLogHex
   195  	debugLogPtr
   196  	debugLogString
   197  	debugLogConstString
   198  	debugLogStringOverflow
   199  
   200  	debugLogPC
   201  	debugLogTraceback
   202  )
   203  
   204  //go:nosplit
   205  func (l dloggerFake) b(x bool) dloggerFake { return l }
   206  
   207  //go:nosplit
   208  func (l *dloggerImpl) b(x bool) *dloggerImpl {
   209  	if x {
   210  		l.w.byte(debugLogBoolTrue)
   211  	} else {
   212  		l.w.byte(debugLogBoolFalse)
   213  	}
   214  	return l
   215  }
   216  
   217  //go:nosplit
   218  func (l dloggerFake) i(x int) dloggerFake { return l }
   219  
   220  //go:nosplit
   221  func (l *dloggerImpl) i(x int) *dloggerImpl {
   222  	return l.i64(int64(x))
   223  }
   224  
   225  //go:nosplit
   226  func (l dloggerFake) i8(x int8) dloggerFake { return l }
   227  
   228  //go:nosplit
   229  func (l *dloggerImpl) i8(x int8) *dloggerImpl {
   230  	return l.i64(int64(x))
   231  }
   232  
   233  //go:nosplit
   234  func (l dloggerFake) i16(x int16) dloggerFake { return l }
   235  
   236  //go:nosplit
   237  func (l *dloggerImpl) i16(x int16) *dloggerImpl {
   238  	return l.i64(int64(x))
   239  }
   240  
   241  //go:nosplit
   242  func (l dloggerFake) i32(x int32) dloggerFake { return l }
   243  
   244  //go:nosplit
   245  func (l *dloggerImpl) i32(x int32) *dloggerImpl {
   246  	return l.i64(int64(x))
   247  }
   248  
   249  //go:nosplit
   250  func (l dloggerFake) i64(x int64) dloggerFake { return l }
   251  
   252  //go:nosplit
   253  func (l *dloggerImpl) i64(x int64) *dloggerImpl {
   254  	l.w.byte(debugLogInt)
   255  	l.w.varint(x)
   256  	return l
   257  }
   258  
   259  //go:nosplit
   260  func (l dloggerFake) u(x uint) dloggerFake { return l }
   261  
   262  //go:nosplit
   263  func (l *dloggerImpl) u(x uint) *dloggerImpl {
   264  	return l.u64(uint64(x))
   265  }
   266  
   267  //go:nosplit
   268  func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
   269  
   270  //go:nosplit
   271  func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
   272  	return l.u64(uint64(x))
   273  }
   274  
   275  //go:nosplit
   276  func (l dloggerFake) u8(x uint8) dloggerFake { return l }
   277  
   278  //go:nosplit
   279  func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
   280  	return l.u64(uint64(x))
   281  }
   282  
   283  //go:nosplit
   284  func (l dloggerFake) u16(x uint16) dloggerFake { return l }
   285  
   286  //go:nosplit
   287  func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
   288  	return l.u64(uint64(x))
   289  }
   290  
   291  //go:nosplit
   292  func (l dloggerFake) u32(x uint32) dloggerFake { return l }
   293  
   294  //go:nosplit
   295  func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
   296  	return l.u64(uint64(x))
   297  }
   298  
   299  //go:nosplit
   300  func (l dloggerFake) u64(x uint64) dloggerFake { return l }
   301  
   302  //go:nosplit
   303  func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
   304  	l.w.byte(debugLogUint)
   305  	l.w.uvarint(x)
   306  	return l
   307  }
   308  
   309  //go:nosplit
   310  func (l dloggerFake) hex(x uint64) dloggerFake { return l }
   311  
   312  //go:nosplit
   313  func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
   314  	l.w.byte(debugLogHex)
   315  	l.w.uvarint(x)
   316  	return l
   317  }
   318  
   319  //go:nosplit
   320  func (l dloggerFake) p(x any) dloggerFake { return l }
   321  
   322  //go:nosplit
   323  func (l *dloggerImpl) p(x any) *dloggerImpl {
   324  	l.w.byte(debugLogPtr)
   325  	if x == nil {
   326  		l.w.uvarint(0)
   327  	} else {
   328  		v := efaceOf(&x)
   329  		switch v._type.Kind_ & abi.KindMask {
   330  		case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
   331  			l.w.uvarint(uint64(uintptr(v.data)))
   332  		default:
   333  			throw("not a pointer type")
   334  		}
   335  	}
   336  	return l
   337  }
   338  
   339  //go:nosplit
   340  func (l dloggerFake) s(x string) dloggerFake { return l }
   341  
   342  //go:nosplit
   343  func (l *dloggerImpl) s(x string) *dloggerImpl {
   344  	strData := unsafe.StringData(x)
   345  	datap := &firstmoduledata
   346  	if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
   347  		// String constants are in the rodata section, which
   348  		// isn't recorded in moduledata. But it has to be
   349  		// somewhere between etext and end.
   350  		l.w.byte(debugLogConstString)
   351  		l.w.uvarint(uint64(len(x)))
   352  		l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
   353  	} else {
   354  		l.w.byte(debugLogString)
   355  		// We can't use unsafe.Slice as it may panic, which isn't safe
   356  		// in this (potentially) nowritebarrier context.
   357  		var b []byte
   358  		bb := (*slice)(unsafe.Pointer(&b))
   359  		bb.array = unsafe.Pointer(strData)
   360  		bb.len, bb.cap = len(x), len(x)
   361  		if len(b) > debugLogStringLimit {
   362  			b = b[:debugLogStringLimit]
   363  		}
   364  		l.w.uvarint(uint64(len(b)))
   365  		l.w.bytes(b)
   366  		if len(b) != len(x) {
   367  			l.w.byte(debugLogStringOverflow)
   368  			l.w.uvarint(uint64(len(x) - len(b)))
   369  		}
   370  	}
   371  	return l
   372  }
   373  
   374  //go:nosplit
   375  func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
   376  
   377  //go:nosplit
   378  func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
   379  	l.w.byte(debugLogPC)
   380  	l.w.uvarint(uint64(x))
   381  	return l
   382  }
   383  
   384  //go:nosplit
   385  func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
   386  
   387  //go:nosplit
   388  func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
   389  	l.w.byte(debugLogTraceback)
   390  	l.w.uvarint(uint64(len(x)))
   391  	for _, pc := range x {
   392  		l.w.uvarint(uint64(pc))
   393  	}
   394  	return l
   395  }
   396  
   397  // A debugLogWriter is a ring buffer of binary debug log records.
   398  //
   399  // A log record consists of a 2-byte framing header and a sequence of
   400  // fields. The framing header gives the size of the record as a little
   401  // endian 16-bit value. Each field starts with a byte indicating its
   402  // type, followed by type-specific data. If the size in the framing
   403  // header is 0, it's a sync record consisting of two little endian
   404  // 64-bit values giving a new time base.
   405  //
   406  // Because this is a ring buffer, new records will eventually
   407  // overwrite old records. Hence, it maintains a reader that consumes
   408  // the log as it gets overwritten. That reader state is where an
   409  // actual log reader would start.
   410  type debugLogWriter struct {
   411  	_     sys.NotInHeap
   412  	write uint64
   413  	data  debugLogBuf
   414  
   415  	// tick and nano are the time bases from the most recently
   416  	// written sync record.
   417  	tick, nano uint64
   418  
   419  	// r is a reader that consumes records as they get overwritten
   420  	// by the writer. It also acts as the initial reader state
   421  	// when printing the log.
   422  	r debugLogReader
   423  
   424  	// buf is a scratch buffer for encoding. This is here to
   425  	// reduce stack usage.
   426  	buf [10]byte
   427  }
   428  
   429  type debugLogBuf struct {
   430  	_ sys.NotInHeap
   431  	b [debugLogBytes]byte
   432  }
   433  
   434  const (
   435  	// debugLogHeaderSize is the number of bytes in the framing
   436  	// header of every dlog record.
   437  	debugLogHeaderSize = 2
   438  
   439  	// debugLogSyncSize is the number of bytes in a sync record.
   440  	debugLogSyncSize = debugLogHeaderSize + 2*8
   441  )
   442  
   443  //go:nosplit
   444  func (l *debugLogWriter) ensure(n uint64) {
   445  	for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
   446  		// Consume record at begin.
   447  		if l.r.skip() == ^uint64(0) {
   448  			// Wrapped around within a record.
   449  			//
   450  			// TODO(austin): It would be better to just
   451  			// eat the whole buffer at this point, but we
   452  			// have to communicate that to the reader
   453  			// somehow.
   454  			throw("record wrapped around")
   455  		}
   456  	}
   457  }
   458  
   459  //go:nosplit
   460  func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
   461  	l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
   462  	l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
   463  	return size <= 0xFFFF
   464  }
   465  
   466  //go:nosplit
   467  func (l *debugLogWriter) writeSync(tick, nano uint64) {
   468  	l.tick, l.nano = tick, nano
   469  	l.ensure(debugLogHeaderSize)
   470  	l.writeFrameAt(l.write, 0)
   471  	l.write += debugLogHeaderSize
   472  	l.writeUint64LE(tick)
   473  	l.writeUint64LE(nano)
   474  	l.r.end = l.write
   475  }
   476  
   477  //go:nosplit
   478  func (l *debugLogWriter) writeUint64LE(x uint64) {
   479  	var b [8]byte
   480  	b[0] = byte(x)
   481  	b[1] = byte(x >> 8)
   482  	b[2] = byte(x >> 16)
   483  	b[3] = byte(x >> 24)
   484  	b[4] = byte(x >> 32)
   485  	b[5] = byte(x >> 40)
   486  	b[6] = byte(x >> 48)
   487  	b[7] = byte(x >> 56)
   488  	l.bytes(b[:])
   489  }
   490  
   491  //go:nosplit
   492  func (l *debugLogWriter) byte(x byte) {
   493  	l.ensure(1)
   494  	pos := l.write
   495  	l.write++
   496  	l.data.b[pos%uint64(len(l.data.b))] = x
   497  }
   498  
   499  //go:nosplit
   500  func (l *debugLogWriter) bytes(x []byte) {
   501  	l.ensure(uint64(len(x)))
   502  	pos := l.write
   503  	l.write += uint64(len(x))
   504  	for len(x) > 0 {
   505  		n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
   506  		pos += uint64(n)
   507  		x = x[n:]
   508  	}
   509  }
   510  
   511  //go:nosplit
   512  func (l *debugLogWriter) varint(x int64) {
   513  	var u uint64
   514  	if x < 0 {
   515  		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
   516  	} else {
   517  		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
   518  	}
   519  	l.uvarint(u)
   520  }
   521  
   522  //go:nosplit
   523  func (l *debugLogWriter) uvarint(u uint64) {
   524  	i := 0
   525  	for u >= 0x80 {
   526  		l.buf[i] = byte(u) | 0x80
   527  		u >>= 7
   528  		i++
   529  	}
   530  	l.buf[i] = byte(u)
   531  	i++
   532  	l.bytes(l.buf[:i])
   533  }
   534  
   535  type debugLogReader struct {
   536  	data *debugLogBuf
   537  
   538  	// begin and end are the positions in the log of the beginning
   539  	// and end of the log data, modulo len(data).
   540  	begin, end uint64
   541  
   542  	// tick and nano are the current time base at begin.
   543  	tick, nano uint64
   544  }
   545  
   546  //go:nosplit
   547  func (r *debugLogReader) skip() uint64 {
   548  	// Read size at pos.
   549  	if r.begin+debugLogHeaderSize > r.end {
   550  		return ^uint64(0)
   551  	}
   552  	size := uint64(r.readUint16LEAt(r.begin))
   553  	if size == 0 {
   554  		// Sync packet.
   555  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   556  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   557  		size = debugLogSyncSize
   558  	}
   559  	if r.begin+size > r.end {
   560  		return ^uint64(0)
   561  	}
   562  	r.begin += size
   563  	return size
   564  }
   565  
   566  //go:nosplit
   567  func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
   568  	return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
   569  		uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
   570  }
   571  
   572  //go:nosplit
   573  func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
   574  	var b [8]byte
   575  	for i := range b {
   576  		b[i] = r.data.b[pos%uint64(len(r.data.b))]
   577  		pos++
   578  	}
   579  	return uint64(b[0]) | uint64(b[1])<<8 |
   580  		uint64(b[2])<<16 | uint64(b[3])<<24 |
   581  		uint64(b[4])<<32 | uint64(b[5])<<40 |
   582  		uint64(b[6])<<48 | uint64(b[7])<<56
   583  }
   584  
   585  func (r *debugLogReader) peek() (tick uint64) {
   586  	// Consume any sync records.
   587  	size := uint64(0)
   588  	for size == 0 {
   589  		if r.begin+debugLogHeaderSize > r.end {
   590  			return ^uint64(0)
   591  		}
   592  		size = uint64(r.readUint16LEAt(r.begin))
   593  		if size != 0 {
   594  			break
   595  		}
   596  		if r.begin+debugLogSyncSize > r.end {
   597  			return ^uint64(0)
   598  		}
   599  		// Sync packet.
   600  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   601  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   602  		r.begin += debugLogSyncSize
   603  	}
   604  
   605  	// Peek tick delta.
   606  	if r.begin+size > r.end {
   607  		return ^uint64(0)
   608  	}
   609  	pos := r.begin + debugLogHeaderSize
   610  	var u uint64
   611  	for i := uint(0); ; i += 7 {
   612  		b := r.data.b[pos%uint64(len(r.data.b))]
   613  		pos++
   614  		u |= uint64(b&^0x80) << i
   615  		if b&0x80 == 0 {
   616  			break
   617  		}
   618  	}
   619  	if pos > r.begin+size {
   620  		return ^uint64(0)
   621  	}
   622  	return r.tick + u
   623  }
   624  
   625  func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
   626  	// Read size. We've already skipped sync packets and checked
   627  	// bounds in peek.
   628  	size := uint64(r.readUint16LEAt(r.begin))
   629  	end = r.begin + size
   630  	r.begin += debugLogHeaderSize
   631  
   632  	// Read tick, nano, and p.
   633  	tick = r.uvarint() + r.tick
   634  	nano = r.uvarint() + r.nano
   635  	p = int(r.varint())
   636  
   637  	return
   638  }
   639  
   640  func (r *debugLogReader) uvarint() uint64 {
   641  	var u uint64
   642  	for i := uint(0); ; i += 7 {
   643  		b := r.data.b[r.begin%uint64(len(r.data.b))]
   644  		r.begin++
   645  		u |= uint64(b&^0x80) << i
   646  		if b&0x80 == 0 {
   647  			break
   648  		}
   649  	}
   650  	return u
   651  }
   652  
   653  func (r *debugLogReader) varint() int64 {
   654  	u := r.uvarint()
   655  	var v int64
   656  	if u&1 == 0 {
   657  		v = int64(u >> 1)
   658  	} else {
   659  		v = ^int64(u >> 1)
   660  	}
   661  	return v
   662  }
   663  
   664  func (r *debugLogReader) printVal() bool {
   665  	typ := r.data.b[r.begin%uint64(len(r.data.b))]
   666  	r.begin++
   667  
   668  	switch typ {
   669  	default:
   670  		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
   671  		return false
   672  
   673  	case debugLogUnknown:
   674  		print("<unknown kind>")
   675  
   676  	case debugLogBoolTrue:
   677  		print(true)
   678  
   679  	case debugLogBoolFalse:
   680  		print(false)
   681  
   682  	case debugLogInt:
   683  		print(r.varint())
   684  
   685  	case debugLogUint:
   686  		print(r.uvarint())
   687  
   688  	case debugLogHex, debugLogPtr:
   689  		print(hex(r.uvarint()))
   690  
   691  	case debugLogString:
   692  		sl := r.uvarint()
   693  		if r.begin+sl > r.end {
   694  			r.begin = r.end
   695  			print("<string length corrupted>")
   696  			break
   697  		}
   698  		for sl > 0 {
   699  			b := r.data.b[r.begin%uint64(len(r.data.b)):]
   700  			if uint64(len(b)) > sl {
   701  				b = b[:sl]
   702  			}
   703  			r.begin += uint64(len(b))
   704  			sl -= uint64(len(b))
   705  			gwrite(b)
   706  		}
   707  
   708  	case debugLogConstString:
   709  		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
   710  		ptr += firstmoduledata.etext
   711  		// We can't use unsafe.String as it may panic, which isn't safe
   712  		// in this (potentially) nowritebarrier context.
   713  		str := stringStruct{
   714  			str: unsafe.Pointer(ptr),
   715  			len: len,
   716  		}
   717  		s := *(*string)(unsafe.Pointer(&str))
   718  		print(s)
   719  
   720  	case debugLogStringOverflow:
   721  		print("..(", r.uvarint(), " more bytes)..")
   722  
   723  	case debugLogPC:
   724  		printDebugLogPC(uintptr(r.uvarint()), false)
   725  
   726  	case debugLogTraceback:
   727  		n := int(r.uvarint())
   728  		for i := 0; i < n; i++ {
   729  			print("\n\t")
   730  			// gentraceback PCs are always return PCs.
   731  			// Convert them to call PCs.
   732  			//
   733  			// TODO(austin): Expand inlined frames.
   734  			printDebugLogPC(uintptr(r.uvarint()), true)
   735  		}
   736  	}
   737  
   738  	return true
   739  }
   740  
   741  // printDebugLog prints the debug log.
   742  func printDebugLog() {
   743  	if dlogEnabled {
   744  		printDebugLogImpl()
   745  	}
   746  }
   747  
   748  func printDebugLogImpl() {
   749  	// This function should not panic or throw since it is used in
   750  	// the fatal panic path and this may deadlock.
   751  
   752  	printlock()
   753  
   754  	// Get the list of all debug logs.
   755  	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
   756  	all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
   757  
   758  	// Count the logs.
   759  	n := 0
   760  	for l := all; l != nil; l = l.allLink {
   761  		n++
   762  	}
   763  	if n == 0 {
   764  		printunlock()
   765  		return
   766  	}
   767  
   768  	// Prepare read state for all logs.
   769  	type readState struct {
   770  		debugLogReader
   771  		first    bool
   772  		lost     uint64
   773  		nextTick uint64
   774  	}
   775  	// Use sysAllocOS instead of sysAlloc because we want to interfere
   776  	// with the runtime as little as possible, and sysAlloc updates accounting.
   777  	state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
   778  	if state1 == nil {
   779  		println("failed to allocate read state for", n, "logs")
   780  		printunlock()
   781  		return
   782  	}
   783  	state := (*[1 << 20]readState)(state1)[:n]
   784  	{
   785  		l := all
   786  		for i := range state {
   787  			s := &state[i]
   788  			s.debugLogReader = l.w.r
   789  			s.first = true
   790  			s.lost = l.w.r.begin
   791  			s.nextTick = s.peek()
   792  			l = l.allLink
   793  		}
   794  	}
   795  
   796  	// Print records.
   797  	for {
   798  		// Find the next record.
   799  		var best struct {
   800  			tick uint64
   801  			i    int
   802  		}
   803  		best.tick = ^uint64(0)
   804  		for i := range state {
   805  			if state[i].nextTick < best.tick {
   806  				best.tick = state[i].nextTick
   807  				best.i = i
   808  			}
   809  		}
   810  		if best.tick == ^uint64(0) {
   811  			break
   812  		}
   813  
   814  		// Print record.
   815  		s := &state[best.i]
   816  		if s.first {
   817  			print(">> begin log ", best.i)
   818  			if s.lost != 0 {
   819  				print("; lost first ", s.lost>>10, "KB")
   820  			}
   821  			print(" <<\n")
   822  			s.first = false
   823  		}
   824  
   825  		end, _, nano, p := s.header()
   826  		oldEnd := s.end
   827  		s.end = end
   828  
   829  		print("[")
   830  		var tmpbuf [21]byte
   831  		pnano := int64(nano) - runtimeInitTime
   832  		if pnano < 0 {
   833  			// Logged before runtimeInitTime was set.
   834  			pnano = 0
   835  		}
   836  		pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
   837  		print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
   838  		print(" P ", p, "] ")
   839  
   840  		for i := 0; s.begin < s.end; i++ {
   841  			if i > 0 {
   842  				print(" ")
   843  			}
   844  			if !s.printVal() {
   845  				// Abort this P log.
   846  				print("<aborting P log>")
   847  				end = oldEnd
   848  				break
   849  			}
   850  		}
   851  		println()
   852  
   853  		// Move on to the next record.
   854  		s.begin = end
   855  		s.end = oldEnd
   856  		s.nextTick = s.peek()
   857  	}
   858  
   859  	printunlock()
   860  }
   861  
   862  // printDebugLogPC prints a single symbolized PC. If returnPC is true,
   863  // pc is a return PC that must first be converted to a call PC.
   864  func printDebugLogPC(pc uintptr, returnPC bool) {
   865  	fn := findfunc(pc)
   866  	if returnPC && (!fn.valid() || pc > fn.entry()) {
   867  		// TODO(austin): Don't back up if the previous frame
   868  		// was a sigpanic.
   869  		pc--
   870  	}
   871  
   872  	print(hex(pc))
   873  	if !fn.valid() {
   874  		print(" [unknown PC]")
   875  	} else {
   876  		name := funcname(fn)
   877  		file, line := funcline(fn, pc)
   878  		print(" [", name, "+", hex(pc-fn.entry()),
   879  			" ", file, ":", line, "]")
   880  	}
   881  }
   882  

View as plain text