Source file src/runtime/mcentral.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The mcentral doesn't actually contain the list of free objects; the mspan does.
    10  // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import (
    16  	"internal/runtime/atomic"
    17  	"internal/runtime/sys"
    18  )
    19  
    20  // Central list of free objects of a given size.
    21  type mcentral struct {
    22  	_         sys.NotInHeap
    23  	spanclass spanClass
    24  
    25  	// partial and full contain two mspan sets: one of swept in-use
    26  	// spans, and one of unswept in-use spans. These two trade
    27  	// roles on each GC cycle. The unswept set is drained either by
    28  	// allocation or by the background sweeper in every GC cycle,
    29  	// so only two roles are necessary.
    30  	//
    31  	// sweepgen is increased by 2 on each GC cycle, so the swept
    32  	// spans are in partial[sweepgen/2%2] and the unswept spans are in
    33  	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
    34  	// unswept set and pushes spans that are still in-use on the
    35  	// swept set. Likewise, allocating an in-use span pushes it
    36  	// on the swept set.
    37  	//
    38  	// Some parts of the sweeper can sweep arbitrary spans, and hence
    39  	// can't remove them from the unswept set, but will add the span
    40  	// to the appropriate swept list. As a result, the parts of the
    41  	// sweeper and mcentral that do consume from the unswept list may
    42  	// encounter swept spans, and these should be ignored.
    43  	partial [2]spanSet // list of spans with a free object
    44  	full    [2]spanSet // list of spans with no free objects
    45  }
    46  
    47  // Initialize a single central free list.
    48  func (c *mcentral) init(spc spanClass) {
    49  	c.spanclass = spc
    50  	lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
    51  	lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
    52  	lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
    53  	lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
    54  }
    55  
    56  // partialUnswept returns the spanSet which holds partially-filled
    57  // unswept spans for this sweepgen.
    58  func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
    59  	return &c.partial[1-sweepgen/2%2]
    60  }
    61  
    62  // partialSwept returns the spanSet which holds partially-filled
    63  // swept spans for this sweepgen.
    64  func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
    65  	return &c.partial[sweepgen/2%2]
    66  }
    67  
    68  // fullUnswept returns the spanSet which holds unswept spans without any
    69  // free slots for this sweepgen.
    70  func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
    71  	return &c.full[1-sweepgen/2%2]
    72  }
    73  
    74  // fullSwept returns the spanSet which holds swept spans without any
    75  // free slots for this sweepgen.
    76  func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
    77  	return &c.full[sweepgen/2%2]
    78  }
    79  
    80  // Allocate a span to use in an mcache.
    81  func (c *mcentral) cacheSpan() *mspan {
    82  	// Deduct credit for this span allocation and sweep if necessary.
    83  	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
    84  	deductSweepCredit(spanBytes, 0)
    85  
    86  	traceDone := false
    87  	trace := traceAcquire()
    88  	if trace.ok() {
    89  		trace.GCSweepStart()
    90  		traceRelease(trace)
    91  	}
    92  
    93  	// If we sweep spanBudget spans without finding any free
    94  	// space, just allocate a fresh span. This limits the amount
    95  	// of time we can spend trying to find free space and
    96  	// amortizes the cost of small object sweeping over the
    97  	// benefit of having a full free span to allocate from. By
    98  	// setting this to 100, we limit the space overhead to 1%.
    99  	//
   100  	// TODO(austin,mknyszek): This still has bad worst-case
   101  	// throughput. For example, this could find just one free slot
   102  	// on the 100th swept span. That limits allocation latency, but
   103  	// still has very poor throughput. We could instead keep a
   104  	// running free-to-used budget and switch to fresh span
   105  	// allocation if the budget runs low.
   106  	spanBudget := 100
   107  
   108  	var s *mspan
   109  	var sl sweepLocker
   110  
   111  	// Try partial swept spans first.
   112  	sg := mheap_.sweepgen
   113  	if s = c.partialSwept(sg).pop(); s != nil {
   114  		goto havespan
   115  	}
   116  
   117  	sl = sweep.active.begin()
   118  	if sl.valid {
   119  		// Now try partial unswept spans.
   120  		for ; spanBudget >= 0; spanBudget-- {
   121  			s = c.partialUnswept(sg).pop()
   122  			if s == nil {
   123  				break
   124  			}
   125  			if s, ok := sl.tryAcquire(s); ok {
   126  				// We got ownership of the span, so let's sweep it and use it.
   127  				s.sweep(true)
   128  				sweep.active.end(sl)
   129  				goto havespan
   130  			}
   131  			// We failed to get ownership of the span, which means it's being or
   132  			// has been swept by an asynchronous sweeper that just couldn't remove it
   133  			// from the unswept list. That sweeper took ownership of the span and
   134  			// responsibility for either freeing it to the heap or putting it on the
   135  			// right swept list. Either way, we should just ignore it (and it's unsafe
   136  			// for us to do anything else).
   137  		}
   138  		// Now try full unswept spans, sweeping them and putting them into the
   139  		// right list if we fail to get a span.
   140  		for ; spanBudget >= 0; spanBudget-- {
   141  			s = c.fullUnswept(sg).pop()
   142  			if s == nil {
   143  				break
   144  			}
   145  			if s, ok := sl.tryAcquire(s); ok {
   146  				// We got ownership of the span, so let's sweep it.
   147  				s.sweep(true)
   148  				// Check if there's any free space.
   149  				freeIndex := s.nextFreeIndex()
   150  				if freeIndex != s.nelems {
   151  					s.freeindex = freeIndex
   152  					sweep.active.end(sl)
   153  					goto havespan
   154  				}
   155  				// Add it to the swept list, because sweeping didn't give us any free space.
   156  				c.fullSwept(sg).push(s.mspan)
   157  			}
   158  			// See comment for partial unswept spans.
   159  		}
   160  		sweep.active.end(sl)
   161  	}
   162  	trace = traceAcquire()
   163  	if trace.ok() {
   164  		trace.GCSweepDone()
   165  		traceDone = true
   166  		traceRelease(trace)
   167  	}
   168  
   169  	// We failed to get a span from the mcentral so get one from mheap.
   170  	s = c.grow()
   171  	if s == nil {
   172  		return nil
   173  	}
   174  
   175  	// At this point s is a span that should have free slots.
   176  havespan:
   177  	if !traceDone {
   178  		trace := traceAcquire()
   179  		if trace.ok() {
   180  			trace.GCSweepDone()
   181  			traceRelease(trace)
   182  		}
   183  	}
   184  	n := int(s.nelems) - int(s.allocCount)
   185  	if n == 0 || s.freeindex == s.nelems || s.allocCount == s.nelems {
   186  		throw("span has no free objects")
   187  	}
   188  	freeByteBase := s.freeindex &^ (64 - 1)
   189  	whichByte := freeByteBase / 8
   190  	// Init alloc bits cache.
   191  	s.refillAllocCache(whichByte)
   192  
   193  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   194  	// s.allocCache.
   195  	s.allocCache >>= s.freeindex % 64
   196  
   197  	return s
   198  }
   199  
   200  // Return span from an mcache.
   201  //
   202  // s must have a span class corresponding to this
   203  // mcentral and it must not be empty.
   204  func (c *mcentral) uncacheSpan(s *mspan) {
   205  	if s.allocCount == 0 {
   206  		throw("uncaching span but s.allocCount == 0")
   207  	}
   208  
   209  	sg := mheap_.sweepgen
   210  	stale := s.sweepgen == sg+1
   211  
   212  	// Fix up sweepgen.
   213  	if stale {
   214  		// Span was cached before sweep began. It's our
   215  		// responsibility to sweep it.
   216  		//
   217  		// Set sweepgen to indicate it's not cached but needs
   218  		// sweeping and can't be allocated from. sweep will
   219  		// set s.sweepgen to indicate s is swept.
   220  		atomic.Store(&s.sweepgen, sg-1)
   221  	} else {
   222  		// Indicate that s is no longer cached.
   223  		atomic.Store(&s.sweepgen, sg)
   224  	}
   225  
   226  	// Put the span in the appropriate place.
   227  	if stale {
   228  		// It's stale, so just sweep it. Sweeping will put it on
   229  		// the right list.
   230  		//
   231  		// We don't use a sweepLocker here. Stale cached spans
   232  		// aren't in the global sweep lists, so mark termination
   233  		// itself holds up sweep completion until all mcaches
   234  		// have been swept.
   235  		ss := sweepLocked{s}
   236  		ss.sweep(false)
   237  	} else {
   238  		if int(s.nelems)-int(s.allocCount) > 0 {
   239  			// Put it back on the partial swept list.
   240  			c.partialSwept(sg).push(s)
   241  		} else {
   242  			// There's no free space and it's not stale, so put it on the
   243  			// full swept list.
   244  			c.fullSwept(sg).push(s)
   245  		}
   246  	}
   247  }
   248  
   249  // grow allocates a new empty span from the heap and initializes it for c's size class.
   250  func (c *mcentral) grow() *mspan {
   251  	npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
   252  	size := uintptr(class_to_size[c.spanclass.sizeclass()])
   253  
   254  	s := mheap_.alloc(npages, c.spanclass)
   255  	if s == nil {
   256  		return nil
   257  	}
   258  
   259  	// Use division by multiplication and shifts to quickly compute:
   260  	// n := (npages << _PageShift) / size
   261  	n := s.divideByElemSize(npages << _PageShift)
   262  	s.limit = s.base() + size*n
   263  	s.initHeapBits()
   264  	return s
   265  }
   266  

View as plain text