Source file src/runtime/metrics.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // Metrics implementation exported to runtime/metrics.
     8  
     9  import (
    10  	"internal/godebugs"
    11  	"internal/runtime/gc"
    12  	"unsafe"
    13  )
    14  
    15  var (
    16  	// metrics is a map of runtime/metrics keys to data used by the runtime
    17  	// to sample each metric's value. metricsInit indicates it has been
    18  	// initialized.
    19  	//
    20  	// These fields are protected by metricsSema which should be
    21  	// locked/unlocked with metricsLock() / metricsUnlock().
    22  	metricsSema uint32 = 1
    23  	metricsInit bool
    24  	metrics     map[string]metricData
    25  
    26  	sizeClassBuckets []float64
    27  	timeHistBuckets  []float64
    28  )
    29  
    30  type metricData struct {
    31  	// deps is the set of runtime statistics that this metric
    32  	// depends on. Before compute is called, the statAggregate
    33  	// which will be passed must ensure() these dependencies.
    34  	deps statDepSet
    35  
    36  	// compute is a function that populates a metricValue
    37  	// given a populated statAggregate structure.
    38  	compute func(in *statAggregate, out *metricValue)
    39  }
    40  
    41  func metricsLock() {
    42  	// Acquire the metricsSema but with handoff. Operations are typically
    43  	// expensive enough that queueing up goroutines and handing off between
    44  	// them will be noticeably better-behaved.
    45  	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
    46  	if raceenabled {
    47  		raceacquire(unsafe.Pointer(&metricsSema))
    48  	}
    49  }
    50  
    51  func metricsUnlock() {
    52  	if raceenabled {
    53  		racerelease(unsafe.Pointer(&metricsSema))
    54  	}
    55  	semrelease(&metricsSema)
    56  }
    57  
    58  // initMetrics initializes the metrics map if it hasn't been yet.
    59  //
    60  // metricsSema must be held.
    61  func initMetrics() {
    62  	if metricsInit {
    63  		return
    64  	}
    65  
    66  	sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
    67  	// Skip size class 0 which is a stand-in for large objects, but large
    68  	// objects are tracked separately (and they actually get placed in
    69  	// the last bucket, not the first).
    70  	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
    71  	for i := 1; i < gc.NumSizeClasses; i++ {
    72  		// Size classes have an inclusive upper-bound
    73  		// and exclusive lower bound (e.g. 48-byte size class is
    74  		// (32, 48]) whereas we want and inclusive lower-bound
    75  		// and exclusive upper-bound (e.g. 48-byte size class is
    76  		// [33, 49)). We can achieve this by shifting all bucket
    77  		// boundaries up by 1.
    78  		//
    79  		// Also, a float64 can precisely represent integers with
    80  		// value up to 2^53 and size classes are relatively small
    81  		// (nowhere near 2^48 even) so this will give us exact
    82  		// boundaries.
    83  		sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
    84  	}
    85  	sizeClassBuckets = append(sizeClassBuckets, float64Inf())
    86  
    87  	timeHistBuckets = timeHistogramMetricsBuckets()
    88  	metrics = map[string]metricData{
    89  		"/cgo/go-to-c-calls:calls": {
    90  			compute: func(_ *statAggregate, out *metricValue) {
    91  				out.kind = metricKindUint64
    92  				out.scalar = uint64(NumCgoCall())
    93  			},
    94  		},
    95  		"/cpu/classes/gc/mark/assist:cpu-seconds": {
    96  			deps: makeStatDepSet(cpuStatsDep),
    97  			compute: func(in *statAggregate, out *metricValue) {
    98  				out.kind = metricKindFloat64
    99  				out.scalar = float64bits(nsToSec(in.cpuStats.GCAssistTime))
   100  			},
   101  		},
   102  		"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
   103  			deps: makeStatDepSet(cpuStatsDep),
   104  			compute: func(in *statAggregate, out *metricValue) {
   105  				out.kind = metricKindFloat64
   106  				out.scalar = float64bits(nsToSec(in.cpuStats.GCDedicatedTime))
   107  			},
   108  		},
   109  		"/cpu/classes/gc/mark/idle:cpu-seconds": {
   110  			deps: makeStatDepSet(cpuStatsDep),
   111  			compute: func(in *statAggregate, out *metricValue) {
   112  				out.kind = metricKindFloat64
   113  				out.scalar = float64bits(nsToSec(in.cpuStats.GCIdleTime))
   114  			},
   115  		},
   116  		"/cpu/classes/gc/pause:cpu-seconds": {
   117  			deps: makeStatDepSet(cpuStatsDep),
   118  			compute: func(in *statAggregate, out *metricValue) {
   119  				out.kind = metricKindFloat64
   120  				out.scalar = float64bits(nsToSec(in.cpuStats.GCPauseTime))
   121  			},
   122  		},
   123  		"/cpu/classes/gc/total:cpu-seconds": {
   124  			deps: makeStatDepSet(cpuStatsDep),
   125  			compute: func(in *statAggregate, out *metricValue) {
   126  				out.kind = metricKindFloat64
   127  				out.scalar = float64bits(nsToSec(in.cpuStats.GCTotalTime))
   128  			},
   129  		},
   130  		"/cpu/classes/idle:cpu-seconds": {
   131  			deps: makeStatDepSet(cpuStatsDep),
   132  			compute: func(in *statAggregate, out *metricValue) {
   133  				out.kind = metricKindFloat64
   134  				out.scalar = float64bits(nsToSec(in.cpuStats.IdleTime))
   135  			},
   136  		},
   137  		"/cpu/classes/scavenge/assist:cpu-seconds": {
   138  			deps: makeStatDepSet(cpuStatsDep),
   139  			compute: func(in *statAggregate, out *metricValue) {
   140  				out.kind = metricKindFloat64
   141  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeAssistTime))
   142  			},
   143  		},
   144  		"/cpu/classes/scavenge/background:cpu-seconds": {
   145  			deps: makeStatDepSet(cpuStatsDep),
   146  			compute: func(in *statAggregate, out *metricValue) {
   147  				out.kind = metricKindFloat64
   148  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeBgTime))
   149  			},
   150  		},
   151  		"/cpu/classes/scavenge/total:cpu-seconds": {
   152  			deps: makeStatDepSet(cpuStatsDep),
   153  			compute: func(in *statAggregate, out *metricValue) {
   154  				out.kind = metricKindFloat64
   155  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeTotalTime))
   156  			},
   157  		},
   158  		"/cpu/classes/total:cpu-seconds": {
   159  			deps: makeStatDepSet(cpuStatsDep),
   160  			compute: func(in *statAggregate, out *metricValue) {
   161  				out.kind = metricKindFloat64
   162  				out.scalar = float64bits(nsToSec(in.cpuStats.TotalTime))
   163  			},
   164  		},
   165  		"/cpu/classes/user:cpu-seconds": {
   166  			deps: makeStatDepSet(cpuStatsDep),
   167  			compute: func(in *statAggregate, out *metricValue) {
   168  				out.kind = metricKindFloat64
   169  				out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
   170  			},
   171  		},
   172  		"/gc/cycles/automatic:gc-cycles": {
   173  			deps: makeStatDepSet(sysStatsDep),
   174  			compute: func(in *statAggregate, out *metricValue) {
   175  				out.kind = metricKindUint64
   176  				out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
   177  			},
   178  		},
   179  		"/gc/cycles/forced:gc-cycles": {
   180  			deps: makeStatDepSet(sysStatsDep),
   181  			compute: func(in *statAggregate, out *metricValue) {
   182  				out.kind = metricKindUint64
   183  				out.scalar = in.sysStats.gcCyclesForced
   184  			},
   185  		},
   186  		"/gc/cycles/total:gc-cycles": {
   187  			deps: makeStatDepSet(sysStatsDep),
   188  			compute: func(in *statAggregate, out *metricValue) {
   189  				out.kind = metricKindUint64
   190  				out.scalar = in.sysStats.gcCyclesDone
   191  			},
   192  		},
   193  		"/gc/scan/globals:bytes": {
   194  			deps: makeStatDepSet(gcStatsDep),
   195  			compute: func(in *statAggregate, out *metricValue) {
   196  				out.kind = metricKindUint64
   197  				out.scalar = in.gcStats.globalsScan
   198  			},
   199  		},
   200  		"/gc/scan/heap:bytes": {
   201  			deps: makeStatDepSet(gcStatsDep),
   202  			compute: func(in *statAggregate, out *metricValue) {
   203  				out.kind = metricKindUint64
   204  				out.scalar = in.gcStats.heapScan
   205  			},
   206  		},
   207  		"/gc/scan/stack:bytes": {
   208  			deps: makeStatDepSet(gcStatsDep),
   209  			compute: func(in *statAggregate, out *metricValue) {
   210  				out.kind = metricKindUint64
   211  				out.scalar = in.gcStats.stackScan
   212  			},
   213  		},
   214  		"/gc/scan/total:bytes": {
   215  			deps: makeStatDepSet(gcStatsDep),
   216  			compute: func(in *statAggregate, out *metricValue) {
   217  				out.kind = metricKindUint64
   218  				out.scalar = in.gcStats.totalScan
   219  			},
   220  		},
   221  		"/gc/heap/allocs-by-size:bytes": {
   222  			deps: makeStatDepSet(heapStatsDep),
   223  			compute: func(in *statAggregate, out *metricValue) {
   224  				hist := out.float64HistOrInit(sizeClassBuckets)
   225  				hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
   226  				// Cut off the first index which is ostensibly for size class 0,
   227  				// but large objects are tracked separately so it's actually unused.
   228  				for i, count := range in.heapStats.smallAllocCount[1:] {
   229  					hist.counts[i] = count
   230  				}
   231  			},
   232  		},
   233  		"/gc/heap/allocs:bytes": {
   234  			deps: makeStatDepSet(heapStatsDep),
   235  			compute: func(in *statAggregate, out *metricValue) {
   236  				out.kind = metricKindUint64
   237  				out.scalar = in.heapStats.totalAllocated
   238  			},
   239  		},
   240  		"/gc/heap/allocs:objects": {
   241  			deps: makeStatDepSet(heapStatsDep),
   242  			compute: func(in *statAggregate, out *metricValue) {
   243  				out.kind = metricKindUint64
   244  				out.scalar = in.heapStats.totalAllocs
   245  			},
   246  		},
   247  		"/gc/heap/frees-by-size:bytes": {
   248  			deps: makeStatDepSet(heapStatsDep),
   249  			compute: func(in *statAggregate, out *metricValue) {
   250  				hist := out.float64HistOrInit(sizeClassBuckets)
   251  				hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
   252  				// Cut off the first index which is ostensibly for size class 0,
   253  				// but large objects are tracked separately so it's actually unused.
   254  				for i, count := range in.heapStats.smallFreeCount[1:] {
   255  					hist.counts[i] = count
   256  				}
   257  			},
   258  		},
   259  		"/gc/heap/frees:bytes": {
   260  			deps: makeStatDepSet(heapStatsDep),
   261  			compute: func(in *statAggregate, out *metricValue) {
   262  				out.kind = metricKindUint64
   263  				out.scalar = in.heapStats.totalFreed
   264  			},
   265  		},
   266  		"/gc/heap/frees:objects": {
   267  			deps: makeStatDepSet(heapStatsDep),
   268  			compute: func(in *statAggregate, out *metricValue) {
   269  				out.kind = metricKindUint64
   270  				out.scalar = in.heapStats.totalFrees
   271  			},
   272  		},
   273  		"/gc/heap/goal:bytes": {
   274  			deps: makeStatDepSet(sysStatsDep),
   275  			compute: func(in *statAggregate, out *metricValue) {
   276  				out.kind = metricKindUint64
   277  				out.scalar = in.sysStats.heapGoal
   278  			},
   279  		},
   280  		"/gc/gomemlimit:bytes": {
   281  			compute: func(in *statAggregate, out *metricValue) {
   282  				out.kind = metricKindUint64
   283  				out.scalar = uint64(gcController.memoryLimit.Load())
   284  			},
   285  		},
   286  		"/gc/gogc:percent": {
   287  			compute: func(in *statAggregate, out *metricValue) {
   288  				out.kind = metricKindUint64
   289  				out.scalar = uint64(gcController.gcPercent.Load())
   290  			},
   291  		},
   292  		"/gc/heap/live:bytes": {
   293  			deps: makeStatDepSet(heapStatsDep),
   294  			compute: func(in *statAggregate, out *metricValue) {
   295  				out.kind = metricKindUint64
   296  				out.scalar = gcController.heapMarked
   297  			},
   298  		},
   299  		"/gc/heap/objects:objects": {
   300  			deps: makeStatDepSet(heapStatsDep),
   301  			compute: func(in *statAggregate, out *metricValue) {
   302  				out.kind = metricKindUint64
   303  				out.scalar = in.heapStats.numObjects
   304  			},
   305  		},
   306  		"/gc/heap/tiny/allocs:objects": {
   307  			deps: makeStatDepSet(heapStatsDep),
   308  			compute: func(in *statAggregate, out *metricValue) {
   309  				out.kind = metricKindUint64
   310  				out.scalar = in.heapStats.tinyAllocCount
   311  			},
   312  		},
   313  		"/gc/limiter/last-enabled:gc-cycle": {
   314  			compute: func(_ *statAggregate, out *metricValue) {
   315  				out.kind = metricKindUint64
   316  				out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
   317  			},
   318  		},
   319  		"/gc/pauses:seconds": {
   320  			compute: func(_ *statAggregate, out *metricValue) {
   321  				// N.B. this is identical to /sched/pauses/total/gc:seconds.
   322  				sched.stwTotalTimeGC.write(out)
   323  			},
   324  		},
   325  		"/gc/stack/starting-size:bytes": {
   326  			compute: func(in *statAggregate, out *metricValue) {
   327  				out.kind = metricKindUint64
   328  				out.scalar = uint64(startingStackSize)
   329  			},
   330  		},
   331  		"/memory/classes/heap/free:bytes": {
   332  			deps: makeStatDepSet(heapStatsDep),
   333  			compute: func(in *statAggregate, out *metricValue) {
   334  				out.kind = metricKindUint64
   335  				out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
   336  					in.heapStats.inStacks - in.heapStats.inWorkBufs)
   337  			},
   338  		},
   339  		"/memory/classes/heap/objects:bytes": {
   340  			deps: makeStatDepSet(heapStatsDep),
   341  			compute: func(in *statAggregate, out *metricValue) {
   342  				out.kind = metricKindUint64
   343  				out.scalar = in.heapStats.inObjects
   344  			},
   345  		},
   346  		"/memory/classes/heap/released:bytes": {
   347  			deps: makeStatDepSet(heapStatsDep),
   348  			compute: func(in *statAggregate, out *metricValue) {
   349  				out.kind = metricKindUint64
   350  				out.scalar = uint64(in.heapStats.released)
   351  			},
   352  		},
   353  		"/memory/classes/heap/stacks:bytes": {
   354  			deps: makeStatDepSet(heapStatsDep),
   355  			compute: func(in *statAggregate, out *metricValue) {
   356  				out.kind = metricKindUint64
   357  				out.scalar = uint64(in.heapStats.inStacks)
   358  			},
   359  		},
   360  		"/memory/classes/heap/unused:bytes": {
   361  			deps: makeStatDepSet(heapStatsDep),
   362  			compute: func(in *statAggregate, out *metricValue) {
   363  				out.kind = metricKindUint64
   364  				out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
   365  			},
   366  		},
   367  		"/memory/classes/metadata/mcache/free:bytes": {
   368  			deps: makeStatDepSet(sysStatsDep),
   369  			compute: func(in *statAggregate, out *metricValue) {
   370  				out.kind = metricKindUint64
   371  				out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
   372  			},
   373  		},
   374  		"/memory/classes/metadata/mcache/inuse:bytes": {
   375  			deps: makeStatDepSet(sysStatsDep),
   376  			compute: func(in *statAggregate, out *metricValue) {
   377  				out.kind = metricKindUint64
   378  				out.scalar = in.sysStats.mCacheInUse
   379  			},
   380  		},
   381  		"/memory/classes/metadata/mspan/free:bytes": {
   382  			deps: makeStatDepSet(sysStatsDep),
   383  			compute: func(in *statAggregate, out *metricValue) {
   384  				out.kind = metricKindUint64
   385  				out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
   386  			},
   387  		},
   388  		"/memory/classes/metadata/mspan/inuse:bytes": {
   389  			deps: makeStatDepSet(sysStatsDep),
   390  			compute: func(in *statAggregate, out *metricValue) {
   391  				out.kind = metricKindUint64
   392  				out.scalar = in.sysStats.mSpanInUse
   393  			},
   394  		},
   395  		"/memory/classes/metadata/other:bytes": {
   396  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   397  			compute: func(in *statAggregate, out *metricValue) {
   398  				out.kind = metricKindUint64
   399  				out.scalar = uint64(in.heapStats.inWorkBufs) + in.sysStats.gcMiscSys
   400  			},
   401  		},
   402  		"/memory/classes/os-stacks:bytes": {
   403  			deps: makeStatDepSet(sysStatsDep),
   404  			compute: func(in *statAggregate, out *metricValue) {
   405  				out.kind = metricKindUint64
   406  				out.scalar = in.sysStats.stacksSys
   407  			},
   408  		},
   409  		"/memory/classes/other:bytes": {
   410  			deps: makeStatDepSet(sysStatsDep),
   411  			compute: func(in *statAggregate, out *metricValue) {
   412  				out.kind = metricKindUint64
   413  				out.scalar = in.sysStats.otherSys
   414  			},
   415  		},
   416  		"/memory/classes/profiling/buckets:bytes": {
   417  			deps: makeStatDepSet(sysStatsDep),
   418  			compute: func(in *statAggregate, out *metricValue) {
   419  				out.kind = metricKindUint64
   420  				out.scalar = in.sysStats.buckHashSys
   421  			},
   422  		},
   423  		"/memory/classes/total:bytes": {
   424  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   425  			compute: func(in *statAggregate, out *metricValue) {
   426  				out.kind = metricKindUint64
   427  				out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
   428  					in.sysStats.stacksSys + in.sysStats.mSpanSys +
   429  					in.sysStats.mCacheSys + in.sysStats.buckHashSys +
   430  					in.sysStats.gcMiscSys + in.sysStats.otherSys
   431  			},
   432  		},
   433  		"/sched/gomaxprocs:threads": {
   434  			compute: func(_ *statAggregate, out *metricValue) {
   435  				out.kind = metricKindUint64
   436  				out.scalar = uint64(gomaxprocs)
   437  			},
   438  		},
   439  		"/sched/goroutines:goroutines": {
   440  			compute: func(_ *statAggregate, out *metricValue) {
   441  				out.kind = metricKindUint64
   442  				out.scalar = uint64(gcount())
   443  			},
   444  		},
   445  		"/sched/latencies:seconds": {
   446  			compute: func(_ *statAggregate, out *metricValue) {
   447  				sched.timeToRun.write(out)
   448  			},
   449  		},
   450  		"/sched/pauses/stopping/gc:seconds": {
   451  			compute: func(_ *statAggregate, out *metricValue) {
   452  				sched.stwStoppingTimeGC.write(out)
   453  			},
   454  		},
   455  		"/sched/pauses/stopping/other:seconds": {
   456  			compute: func(_ *statAggregate, out *metricValue) {
   457  				sched.stwStoppingTimeOther.write(out)
   458  			},
   459  		},
   460  		"/sched/pauses/total/gc:seconds": {
   461  			compute: func(_ *statAggregate, out *metricValue) {
   462  				sched.stwTotalTimeGC.write(out)
   463  			},
   464  		},
   465  		"/sched/pauses/total/other:seconds": {
   466  			compute: func(_ *statAggregate, out *metricValue) {
   467  				sched.stwTotalTimeOther.write(out)
   468  			},
   469  		},
   470  		"/sync/mutex/wait/total:seconds": {
   471  			compute: func(_ *statAggregate, out *metricValue) {
   472  				out.kind = metricKindFloat64
   473  				out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
   474  			},
   475  		},
   476  	}
   477  
   478  	for _, info := range godebugs.All {
   479  		if !info.Opaque {
   480  			metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
   481  		}
   482  	}
   483  
   484  	metricsInit = true
   485  }
   486  
   487  func compute0(_ *statAggregate, out *metricValue) {
   488  	out.kind = metricKindUint64
   489  	out.scalar = 0
   490  }
   491  
   492  type metricReader func() uint64
   493  
   494  func (f metricReader) compute(_ *statAggregate, out *metricValue) {
   495  	out.kind = metricKindUint64
   496  	out.scalar = f()
   497  }
   498  
   499  //go:linkname godebug_registerMetric internal/godebug.registerMetric
   500  func godebug_registerMetric(name string, read func() uint64) {
   501  	metricsLock()
   502  	initMetrics()
   503  	d, ok := metrics[name]
   504  	if !ok {
   505  		throw("runtime: unexpected metric registration for " + name)
   506  	}
   507  	d.compute = metricReader(read).compute
   508  	metrics[name] = d
   509  	metricsUnlock()
   510  }
   511  
   512  // statDep is a dependency on a group of statistics
   513  // that a metric might have.
   514  type statDep uint
   515  
   516  const (
   517  	heapStatsDep statDep = iota // corresponds to heapStatsAggregate
   518  	sysStatsDep                 // corresponds to sysStatsAggregate
   519  	cpuStatsDep                 // corresponds to cpuStatsAggregate
   520  	gcStatsDep                  // corresponds to gcStatsAggregate
   521  	numStatsDeps
   522  )
   523  
   524  // statDepSet represents a set of statDeps.
   525  //
   526  // Under the hood, it's a bitmap.
   527  type statDepSet [1]uint64
   528  
   529  // makeStatDepSet creates a new statDepSet from a list of statDeps.
   530  func makeStatDepSet(deps ...statDep) statDepSet {
   531  	var s statDepSet
   532  	for _, d := range deps {
   533  		s[d/64] |= 1 << (d % 64)
   534  	}
   535  	return s
   536  }
   537  
   538  // difference returns set difference of s from b as a new set.
   539  func (s statDepSet) difference(b statDepSet) statDepSet {
   540  	var c statDepSet
   541  	for i := range s {
   542  		c[i] = s[i] &^ b[i]
   543  	}
   544  	return c
   545  }
   546  
   547  // union returns the union of the two sets as a new set.
   548  func (s statDepSet) union(b statDepSet) statDepSet {
   549  	var c statDepSet
   550  	for i := range s {
   551  		c[i] = s[i] | b[i]
   552  	}
   553  	return c
   554  }
   555  
   556  // empty returns true if there are no dependencies in the set.
   557  func (s *statDepSet) empty() bool {
   558  	for _, c := range s {
   559  		if c != 0 {
   560  			return false
   561  		}
   562  	}
   563  	return true
   564  }
   565  
   566  // has returns true if the set contains a given statDep.
   567  func (s *statDepSet) has(d statDep) bool {
   568  	return s[d/64]&(1<<(d%64)) != 0
   569  }
   570  
   571  // heapStatsAggregate represents memory stats obtained from the
   572  // runtime. This set of stats is grouped together because they
   573  // depend on each other in some way to make sense of the runtime's
   574  // current heap memory use. They're also sharded across Ps, so it
   575  // makes sense to grab them all at once.
   576  type heapStatsAggregate struct {
   577  	heapStatsDelta
   578  
   579  	// Derived from values in heapStatsDelta.
   580  
   581  	// inObjects is the bytes of memory occupied by objects,
   582  	inObjects uint64
   583  
   584  	// numObjects is the number of live objects in the heap.
   585  	numObjects uint64
   586  
   587  	// totalAllocated is the total bytes of heap objects allocated
   588  	// over the lifetime of the program.
   589  	totalAllocated uint64
   590  
   591  	// totalFreed is the total bytes of heap objects freed
   592  	// over the lifetime of the program.
   593  	totalFreed uint64
   594  
   595  	// totalAllocs is the number of heap objects allocated over
   596  	// the lifetime of the program.
   597  	totalAllocs uint64
   598  
   599  	// totalFrees is the number of heap objects freed over
   600  	// the lifetime of the program.
   601  	totalFrees uint64
   602  }
   603  
   604  // compute populates the heapStatsAggregate with values from the runtime.
   605  func (a *heapStatsAggregate) compute() {
   606  	memstats.heapStats.read(&a.heapStatsDelta)
   607  
   608  	// Calculate derived stats.
   609  	a.totalAllocs = a.largeAllocCount
   610  	a.totalFrees = a.largeFreeCount
   611  	a.totalAllocated = a.largeAlloc
   612  	a.totalFreed = a.largeFree
   613  	for i := range a.smallAllocCount {
   614  		na := a.smallAllocCount[i]
   615  		nf := a.smallFreeCount[i]
   616  		a.totalAllocs += na
   617  		a.totalFrees += nf
   618  		a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
   619  		a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
   620  	}
   621  	a.inObjects = a.totalAllocated - a.totalFreed
   622  	a.numObjects = a.totalAllocs - a.totalFrees
   623  }
   624  
   625  // sysStatsAggregate represents system memory stats obtained
   626  // from the runtime. This set of stats is grouped together because
   627  // they're all relatively cheap to acquire and generally independent
   628  // of one another and other runtime memory stats. The fact that they
   629  // may be acquired at different times, especially with respect to
   630  // heapStatsAggregate, means there could be some skew, but because of
   631  // these stats are independent, there's no real consistency issue here.
   632  type sysStatsAggregate struct {
   633  	stacksSys      uint64
   634  	mSpanSys       uint64
   635  	mSpanInUse     uint64
   636  	mCacheSys      uint64
   637  	mCacheInUse    uint64
   638  	buckHashSys    uint64
   639  	gcMiscSys      uint64
   640  	otherSys       uint64
   641  	heapGoal       uint64
   642  	gcCyclesDone   uint64
   643  	gcCyclesForced uint64
   644  }
   645  
   646  // compute populates the sysStatsAggregate with values from the runtime.
   647  func (a *sysStatsAggregate) compute() {
   648  	a.stacksSys = memstats.stacks_sys.load()
   649  	a.buckHashSys = memstats.buckhash_sys.load()
   650  	a.gcMiscSys = memstats.gcMiscSys.load()
   651  	a.otherSys = memstats.other_sys.load()
   652  	a.heapGoal = gcController.heapGoal()
   653  	a.gcCyclesDone = uint64(memstats.numgc)
   654  	a.gcCyclesForced = uint64(memstats.numforcedgc)
   655  
   656  	systemstack(func() {
   657  		lock(&mheap_.lock)
   658  		a.mSpanSys = memstats.mspan_sys.load()
   659  		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
   660  		a.mCacheSys = memstats.mcache_sys.load()
   661  		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
   662  		unlock(&mheap_.lock)
   663  	})
   664  }
   665  
   666  // cpuStatsAggregate represents CPU stats obtained from the runtime
   667  // acquired together to avoid skew and inconsistencies.
   668  type cpuStatsAggregate struct {
   669  	cpuStats
   670  }
   671  
   672  // compute populates the cpuStatsAggregate with values from the runtime.
   673  func (a *cpuStatsAggregate) compute() {
   674  	a.cpuStats = work.cpuStats
   675  	// TODO(mknyszek): Update the CPU stats again so that we're not
   676  	// just relying on the STW snapshot. The issue here is that currently
   677  	// this will cause non-monotonicity in the "user" CPU time metric.
   678  	//
   679  	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
   680  }
   681  
   682  // gcStatsAggregate represents various GC stats obtained from the runtime
   683  // acquired together to avoid skew and inconsistencies.
   684  type gcStatsAggregate struct {
   685  	heapScan    uint64
   686  	stackScan   uint64
   687  	globalsScan uint64
   688  	totalScan   uint64
   689  }
   690  
   691  // compute populates the gcStatsAggregate with values from the runtime.
   692  func (a *gcStatsAggregate) compute() {
   693  	a.heapScan = gcController.heapScan.Load()
   694  	a.stackScan = gcController.lastStackScan.Load()
   695  	a.globalsScan = gcController.globalsScan.Load()
   696  	a.totalScan = a.heapScan + a.stackScan + a.globalsScan
   697  }
   698  
   699  // nsToSec takes a duration in nanoseconds and converts it to seconds as
   700  // a float64.
   701  func nsToSec(ns int64) float64 {
   702  	return float64(ns) / 1e9
   703  }
   704  
   705  // statAggregate is the main driver of the metrics implementation.
   706  //
   707  // It contains multiple aggregates of runtime statistics, as well
   708  // as a set of these aggregates that it has populated. The aggregates
   709  // are populated lazily by its ensure method.
   710  type statAggregate struct {
   711  	ensured   statDepSet
   712  	heapStats heapStatsAggregate
   713  	sysStats  sysStatsAggregate
   714  	cpuStats  cpuStatsAggregate
   715  	gcStats   gcStatsAggregate
   716  }
   717  
   718  // ensure populates statistics aggregates determined by deps if they
   719  // haven't yet been populated.
   720  func (a *statAggregate) ensure(deps *statDepSet) {
   721  	missing := deps.difference(a.ensured)
   722  	if missing.empty() {
   723  		return
   724  	}
   725  	for i := statDep(0); i < numStatsDeps; i++ {
   726  		if !missing.has(i) {
   727  			continue
   728  		}
   729  		switch i {
   730  		case heapStatsDep:
   731  			a.heapStats.compute()
   732  		case sysStatsDep:
   733  			a.sysStats.compute()
   734  		case cpuStatsDep:
   735  			a.cpuStats.compute()
   736  		case gcStatsDep:
   737  			a.gcStats.compute()
   738  		}
   739  	}
   740  	a.ensured = a.ensured.union(missing)
   741  }
   742  
   743  // metricKind is a runtime copy of runtime/metrics.ValueKind and
   744  // must be kept structurally identical to that type.
   745  type metricKind int
   746  
   747  const (
   748  	// These values must be kept identical to their corresponding Kind* values
   749  	// in the runtime/metrics package.
   750  	metricKindBad metricKind = iota
   751  	metricKindUint64
   752  	metricKindFloat64
   753  	metricKindFloat64Histogram
   754  )
   755  
   756  // metricSample is a runtime copy of runtime/metrics.Sample and
   757  // must be kept structurally identical to that type.
   758  type metricSample struct {
   759  	name  string
   760  	value metricValue
   761  }
   762  
   763  // metricValue is a runtime copy of runtime/metrics.Sample and
   764  // must be kept structurally identical to that type.
   765  type metricValue struct {
   766  	kind    metricKind
   767  	scalar  uint64         // contains scalar values for scalar Kinds.
   768  	pointer unsafe.Pointer // contains non-scalar values.
   769  }
   770  
   771  // float64HistOrInit tries to pull out an existing float64Histogram
   772  // from the value, but if none exists, then it allocates one with
   773  // the given buckets.
   774  func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
   775  	var hist *metricFloat64Histogram
   776  	if v.kind == metricKindFloat64Histogram && v.pointer != nil {
   777  		hist = (*metricFloat64Histogram)(v.pointer)
   778  	} else {
   779  		v.kind = metricKindFloat64Histogram
   780  		hist = new(metricFloat64Histogram)
   781  		v.pointer = unsafe.Pointer(hist)
   782  	}
   783  	hist.buckets = buckets
   784  	if len(hist.counts) != len(hist.buckets)-1 {
   785  		hist.counts = make([]uint64, len(buckets)-1)
   786  	}
   787  	return hist
   788  }
   789  
   790  // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
   791  // and must be kept structurally identical to that type.
   792  type metricFloat64Histogram struct {
   793  	counts  []uint64
   794  	buckets []float64
   795  }
   796  
   797  // agg is used by readMetrics, and is protected by metricsSema.
   798  //
   799  // Managed as a global variable because its pointer will be
   800  // an argument to a dynamically-defined function, and we'd
   801  // like to avoid it escaping to the heap.
   802  var agg statAggregate
   803  
   804  type metricName struct {
   805  	name string
   806  	kind metricKind
   807  }
   808  
   809  // readMetricNames is the implementation of runtime/metrics.readMetricNames,
   810  // used by the runtime/metrics test and otherwise unreferenced.
   811  //
   812  //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
   813  func readMetricNames() []string {
   814  	metricsLock()
   815  	initMetrics()
   816  	n := len(metrics)
   817  	metricsUnlock()
   818  
   819  	list := make([]string, 0, n)
   820  
   821  	metricsLock()
   822  	for name := range metrics {
   823  		list = append(list, name)
   824  	}
   825  	metricsUnlock()
   826  
   827  	return list
   828  }
   829  
   830  // readMetrics is the implementation of runtime/metrics.Read.
   831  //
   832  //go:linkname readMetrics runtime/metrics.runtime_readMetrics
   833  func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
   834  	metricsLock()
   835  
   836  	// Ensure the map is initialized.
   837  	initMetrics()
   838  
   839  	// Read the metrics.
   840  	readMetricsLocked(samplesp, len, cap)
   841  	metricsUnlock()
   842  }
   843  
   844  // readMetricsLocked is the internal, locked portion of readMetrics.
   845  //
   846  // Broken out for more robust testing. metricsLock must be held and
   847  // initMetrics must have been called already.
   848  func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
   849  	// Construct a slice from the args.
   850  	sl := slice{samplesp, len, cap}
   851  	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
   852  
   853  	// Clear agg defensively.
   854  	agg = statAggregate{}
   855  
   856  	// Sample.
   857  	for i := range samples {
   858  		sample := &samples[i]
   859  		data, ok := metrics[sample.name]
   860  		if !ok {
   861  			sample.value.kind = metricKindBad
   862  			continue
   863  		}
   864  		// Ensure we have all the stats we need.
   865  		// agg is populated lazily.
   866  		agg.ensure(&data.deps)
   867  
   868  		// Compute the value based on the stats we have.
   869  		data.compute(&agg, &sample.value)
   870  	}
   871  }
   872  

View as plain text