Source file src/runtime/mcache.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/runtime/atomic"
     9  	"internal/runtime/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Per-thread (in Go, per-P) cache for small objects.
    14  // This includes a small object cache and local allocation stats.
    15  // No locking needed because it is per-thread (per-P).
    16  //
    17  // mcaches are allocated from non-GC'd memory, so any heap pointers
    18  // must be specially handled.
    19  type mcache struct {
    20  	_ sys.NotInHeap
    21  
    22  	// The following members are accessed on every malloc,
    23  	// so they are grouped here for better caching.
    24  	nextSample  int64   // trigger heap sample after allocating this many bytes
    25  	memProfRate int     // cached mem profile rate, used to detect changes
    26  	scanAlloc   uintptr // bytes of scannable heap allocated
    27  
    28  	// Allocator cache for tiny objects w/o pointers.
    29  	// See "Tiny allocator" comment in malloc.go.
    30  
    31  	// tiny points to the beginning of the current tiny block, or
    32  	// nil if there is no current tiny block.
    33  	//
    34  	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    35  	// we handle it by clearing it in releaseAll during mark
    36  	// termination.
    37  	//
    38  	// tinyAllocs is the number of tiny allocations performed
    39  	// by the P that owns this mcache.
    40  	tiny       uintptr
    41  	tinyoffset uintptr
    42  	tinyAllocs uintptr
    43  
    44  	// The rest is not accessed on every malloc.
    45  
    46  	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
    47  
    48  	stackcache [_NumStackOrders]stackfreelist
    49  
    50  	// flushGen indicates the sweepgen during which this mcache
    51  	// was last flushed. If flushGen != mheap_.sweepgen, the spans
    52  	// in this mcache are stale and need to the flushed so they
    53  	// can be swept. This is done in acquirep.
    54  	flushGen atomic.Uint32
    55  }
    56  
    57  // A gclink is a node in a linked list of blocks, like mlink,
    58  // but it is opaque to the garbage collector.
    59  // The GC does not trace the pointers during collection,
    60  // and the compiler does not emit write barriers for assignments
    61  // of gclinkptr values. Code should store references to gclinks
    62  // as gclinkptr, not as *gclink.
    63  type gclink struct {
    64  	next gclinkptr
    65  }
    66  
    67  // A gclinkptr is a pointer to a gclink, but it is opaque
    68  // to the garbage collector.
    69  type gclinkptr uintptr
    70  
    71  // ptr returns the *gclink form of p.
    72  // The result should be used for accessing fields, not stored
    73  // in other data structures.
    74  func (p gclinkptr) ptr() *gclink {
    75  	return (*gclink)(unsafe.Pointer(p))
    76  }
    77  
    78  type stackfreelist struct {
    79  	list gclinkptr // linked list of free stacks
    80  	size uintptr   // total size of stacks in list
    81  }
    82  
    83  // dummy mspan that contains no free objects.
    84  var emptymspan mspan
    85  
    86  func allocmcache() *mcache {
    87  	var c *mcache
    88  	systemstack(func() {
    89  		lock(&mheap_.lock)
    90  		c = (*mcache)(mheap_.cachealloc.alloc())
    91  		c.flushGen.Store(mheap_.sweepgen)
    92  		unlock(&mheap_.lock)
    93  	})
    94  	for i := range c.alloc {
    95  		c.alloc[i] = &emptymspan
    96  	}
    97  	c.nextSample = nextSample()
    98  	return c
    99  }
   100  
   101  // freemcache releases resources associated with this
   102  // mcache and puts the object onto a free list.
   103  //
   104  // In some cases there is no way to simply release
   105  // resources, such as statistics, so donate them to
   106  // a different mcache (the recipient).
   107  func freemcache(c *mcache) {
   108  	systemstack(func() {
   109  		c.releaseAll()
   110  		stackcache_clear(c)
   111  
   112  		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
   113  		// with the stealing of gcworkbufs during garbage collection to avoid
   114  		// a race where the workbuf is double-freed.
   115  		// gcworkbuffree(c.gcworkbuf)
   116  
   117  		lock(&mheap_.lock)
   118  		mheap_.cachealloc.free(unsafe.Pointer(c))
   119  		unlock(&mheap_.lock)
   120  	})
   121  }
   122  
   123  // getMCache is a convenience function which tries to obtain an mcache.
   124  //
   125  // Returns nil if we're not bootstrapping or we don't have a P. The caller's
   126  // P must not change, so we must be in a non-preemptible state.
   127  func getMCache(mp *m) *mcache {
   128  	// Grab the mcache, since that's where stats live.
   129  	pp := mp.p.ptr()
   130  	var c *mcache
   131  	if pp == nil {
   132  		// We will be called without a P while bootstrapping,
   133  		// in which case we use mcache0, which is set in mallocinit.
   134  		// mcache0 is cleared when bootstrapping is complete,
   135  		// by procresize.
   136  		c = mcache0
   137  	} else {
   138  		c = pp.mcache
   139  	}
   140  	return c
   141  }
   142  
   143  // refill acquires a new span of span class spc for c. This span will
   144  // have at least one free object. The current span in c must be full.
   145  //
   146  // Must run in a non-preemptible context since otherwise the owner of
   147  // c could change.
   148  func (c *mcache) refill(spc spanClass) {
   149  	// Return the current cached span to the central lists.
   150  	s := c.alloc[spc]
   151  
   152  	if s.allocCount != s.nelems {
   153  		throw("refill of span with free space remaining")
   154  	}
   155  	if s != &emptymspan {
   156  		// Mark this span as no longer cached.
   157  		if s.sweepgen != mheap_.sweepgen+3 {
   158  			throw("bad sweepgen in refill")
   159  		}
   160  		mheap_.central[spc].mcentral.uncacheSpan(s)
   161  
   162  		// Count up how many slots were used and record it.
   163  		stats := memstats.heapStats.acquire()
   164  		slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
   165  		atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
   166  
   167  		// Flush tinyAllocs.
   168  		if spc == tinySpanClass {
   169  			atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
   170  			c.tinyAllocs = 0
   171  		}
   172  		memstats.heapStats.release()
   173  
   174  		// Count the allocs in inconsistent, internal stats.
   175  		bytesAllocated := slotsUsed * int64(s.elemsize)
   176  		gcController.totalAlloc.Add(bytesAllocated)
   177  
   178  		// Clear the second allocCount just to be safe.
   179  		s.allocCountBeforeCache = 0
   180  	}
   181  
   182  	// Get a new cached span from the central lists.
   183  	s = mheap_.central[spc].mcentral.cacheSpan()
   184  	if s == nil {
   185  		throw("out of memory")
   186  	}
   187  
   188  	if s.allocCount == s.nelems {
   189  		throw("span has no free space")
   190  	}
   191  
   192  	// Indicate that this span is cached and prevent asynchronous
   193  	// sweeping in the next sweep phase.
   194  	s.sweepgen = mheap_.sweepgen + 3
   195  
   196  	// Store the current alloc count for accounting later.
   197  	s.allocCountBeforeCache = s.allocCount
   198  
   199  	// Update heapLive and flush scanAlloc.
   200  	//
   201  	// We have not yet allocated anything new into the span, but we
   202  	// assume that all of its slots will get used, so this makes
   203  	// heapLive an overestimate.
   204  	//
   205  	// When the span gets uncached, we'll fix up this overestimate
   206  	// if necessary (see releaseAll).
   207  	//
   208  	// We pick an overestimate here because an underestimate leads
   209  	// the pacer to believe that it's in better shape than it is,
   210  	// which appears to lead to more memory used. See #53738 for
   211  	// more details.
   212  	usedBytes := uintptr(s.allocCount) * s.elemsize
   213  	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
   214  	c.scanAlloc = 0
   215  
   216  	c.alloc[spc] = s
   217  }
   218  
   219  // allocLarge allocates a span for a large object.
   220  func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
   221  	if size+_PageSize < size {
   222  		throw("out of memory")
   223  	}
   224  	npages := size >> _PageShift
   225  	if size&_PageMask != 0 {
   226  		npages++
   227  	}
   228  
   229  	// Deduct credit for this span allocation and sweep if
   230  	// necessary. mHeap_Alloc will also sweep npages, so this only
   231  	// pays the debt down to npage pages.
   232  	deductSweepCredit(npages*_PageSize, npages)
   233  
   234  	spc := makeSpanClass(0, noscan)
   235  	s := mheap_.alloc(npages, spc)
   236  	if s == nil {
   237  		throw("out of memory")
   238  	}
   239  
   240  	// Count the alloc in consistent, external stats.
   241  	stats := memstats.heapStats.acquire()
   242  	atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
   243  	atomic.Xadd64(&stats.largeAllocCount, 1)
   244  	memstats.heapStats.release()
   245  
   246  	// Count the alloc in inconsistent, internal stats.
   247  	gcController.totalAlloc.Add(int64(npages * pageSize))
   248  
   249  	// Update heapLive.
   250  	gcController.update(int64(s.npages*pageSize), 0)
   251  
   252  	// Put the large span in the mcentral swept list so that it's
   253  	// visible to the background sweeper.
   254  	mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
   255  	s.limit = s.base() + size
   256  	s.initHeapBits()
   257  	return s
   258  }
   259  
   260  func (c *mcache) releaseAll() {
   261  	// Take this opportunity to flush scanAlloc.
   262  	scanAlloc := int64(c.scanAlloc)
   263  	c.scanAlloc = 0
   264  
   265  	sg := mheap_.sweepgen
   266  	dHeapLive := int64(0)
   267  	for i := range c.alloc {
   268  		s := c.alloc[i]
   269  		if s != &emptymspan {
   270  			slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
   271  			s.allocCountBeforeCache = 0
   272  
   273  			// Adjust smallAllocCount for whatever was allocated.
   274  			stats := memstats.heapStats.acquire()
   275  			atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
   276  			memstats.heapStats.release()
   277  
   278  			// Adjust the actual allocs in inconsistent, internal stats.
   279  			// We assumed earlier that the full span gets allocated.
   280  			gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
   281  
   282  			if s.sweepgen != sg+1 {
   283  				// refill conservatively counted unallocated slots in gcController.heapLive.
   284  				// Undo this.
   285  				//
   286  				// If this span was cached before sweep, then gcController.heapLive was totally
   287  				// recomputed since caching this span, so we don't do this for stale spans.
   288  				dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize)
   289  			}
   290  
   291  			// Release the span to the mcentral.
   292  			mheap_.central[i].mcentral.uncacheSpan(s)
   293  			c.alloc[i] = &emptymspan
   294  		}
   295  	}
   296  	// Clear tinyalloc pool.
   297  	c.tiny = 0
   298  	c.tinyoffset = 0
   299  
   300  	// Flush tinyAllocs.
   301  	stats := memstats.heapStats.acquire()
   302  	atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
   303  	c.tinyAllocs = 0
   304  	memstats.heapStats.release()
   305  
   306  	// Update heapLive and heapScan.
   307  	gcController.update(dHeapLive, scanAlloc)
   308  }
   309  
   310  // prepareForSweep flushes c if the system has entered a new sweep phase
   311  // since c was populated. This must happen between the sweep phase
   312  // starting and the first allocation from c.
   313  func (c *mcache) prepareForSweep() {
   314  	// Alternatively, instead of making sure we do this on every P
   315  	// between starting the world and allocating on that P, we
   316  	// could leave allocate-black on, allow allocation to continue
   317  	// as usual, use a ragged barrier at the beginning of sweep to
   318  	// ensure all cached spans are swept, and then disable
   319  	// allocate-black. However, with this approach it's difficult
   320  	// to avoid spilling mark bits into the *next* GC cycle.
   321  	sg := mheap_.sweepgen
   322  	flushGen := c.flushGen.Load()
   323  	if flushGen == sg {
   324  		return
   325  	} else if flushGen != sg-2 {
   326  		println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg)
   327  		throw("bad flushGen")
   328  	}
   329  	c.releaseAll()
   330  	stackcache_clear(c)
   331  	c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
   332  }
   333  

View as plain text