Source file src/runtime/mgcscavenge_test.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"internal/goos"
    10  	"internal/runtime/atomic"
    11  	"math"
    12  	"math/rand"
    13  	. "runtime"
    14  	"testing"
    15  	"time"
    16  )
    17  
    18  // makePallocData produces an initialized PallocData by setting
    19  // the ranges of described in alloc and scavenge.
    20  func makePallocData(alloc, scavenged []BitRange) *PallocData {
    21  	b := new(PallocData)
    22  	for _, v := range alloc {
    23  		if v.N == 0 {
    24  			// Skip N==0. It's harmless and allocRange doesn't
    25  			// handle this case.
    26  			continue
    27  		}
    28  		b.AllocRange(v.I, v.N)
    29  	}
    30  	for _, v := range scavenged {
    31  		if v.N == 0 {
    32  			// See the previous loop.
    33  			continue
    34  		}
    35  		b.ScavengedSetRange(v.I, v.N)
    36  	}
    37  	return b
    38  }
    39  
    40  func TestFillAligned(t *testing.T) {
    41  	fillAlignedSlow := func(x uint64, m uint) uint64 {
    42  		if m == 1 {
    43  			return x
    44  		}
    45  		out := uint64(0)
    46  		for i := uint(0); i < 64; i += m {
    47  			for j := uint(0); j < m; j++ {
    48  				if x&(uint64(1)<<(i+j)) != 0 {
    49  					out |= ((uint64(1) << m) - 1) << i
    50  					break
    51  				}
    52  			}
    53  		}
    54  		return out
    55  	}
    56  	check := func(x uint64, m uint) {
    57  		want := fillAlignedSlow(x, m)
    58  		if got := FillAligned(x, m); got != want {
    59  			t.Logf("got:  %064b", got)
    60  			t.Logf("want: %064b", want)
    61  			t.Errorf("bad fillAligned(%016x, %d)", x, m)
    62  		}
    63  	}
    64  	for m := uint(1); m <= 64; m *= 2 {
    65  		tests := []uint64{
    66  			0x0000000000000000,
    67  			0x00000000ffffffff,
    68  			0xffffffff00000000,
    69  			0x8000000000000001,
    70  			0xf00000000000000f,
    71  			0xf00000010050000f,
    72  			0xffffffffffffffff,
    73  			0x0000000000000001,
    74  			0x0000000000000002,
    75  			0x0000000000000008,
    76  			uint64(1) << (m - 1),
    77  			uint64(1) << m,
    78  			// Try a few fixed arbitrary examples.
    79  			0xb02b9effcf137016,
    80  			0x3975a076a9fbff18,
    81  			0x0f8c88ec3b81506e,
    82  			0x60f14d80ef2fa0e6,
    83  		}
    84  		for _, test := range tests {
    85  			check(test, m)
    86  		}
    87  		for i := 0; i < 1000; i++ {
    88  			// Try a pseudo-random numbers.
    89  			check(rand.Uint64(), m)
    90  
    91  			if m > 1 {
    92  				// For m != 1, let's construct a slightly more interesting
    93  				// random test. Generate a bitmap which is either 0 or
    94  				// randomly set bits for each m-aligned group of m bits.
    95  				val := uint64(0)
    96  				for n := uint(0); n < 64; n += m {
    97  					// For each group of m bits, flip a coin:
    98  					// * Leave them as zero.
    99  					// * Set them randomly.
   100  					if rand.Uint64()%2 == 0 {
   101  						val |= (rand.Uint64() & ((1 << m) - 1)) << n
   102  					}
   103  				}
   104  				check(val, m)
   105  			}
   106  		}
   107  	}
   108  }
   109  
   110  func TestPallocDataFindScavengeCandidate(t *testing.T) {
   111  	type test struct {
   112  		alloc, scavenged []BitRange
   113  		min, max         uintptr
   114  		want             BitRange
   115  	}
   116  	tests := map[string]test{
   117  		"MixedMin1": {
   118  			alloc:     []BitRange{{0, 40}, {42, PallocChunkPages - 42}},
   119  			scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}},
   120  			min:       1,
   121  			max:       PallocChunkPages,
   122  			want:      BitRange{41, 1},
   123  		},
   124  	}
   125  	if PallocChunkPages >= 512 {
   126  		// avoid constant overflow when PallocChunkPages is small
   127  		var pallocChunkPages uint = PallocChunkPages
   128  		tests["MultiMin1"] = test{
   129  			alloc:     []BitRange{{0, 63}, {65, 20}, {87, pallocChunkPages - 87}},
   130  			scavenged: []BitRange{{86, 1}},
   131  			min:       1,
   132  			max:       PallocChunkPages,
   133  			want:      BitRange{85, 1},
   134  		}
   135  	}
   136  	// Try out different page minimums.
   137  	for m := uintptr(1); m <= 64; m *= 2 {
   138  		suffix := fmt.Sprintf("Min%d", m)
   139  		tests["AllFree"+suffix] = test{
   140  			min:  m,
   141  			max:  PallocChunkPages,
   142  			want: BitRange{0, PallocChunkPages},
   143  		}
   144  		tests["AllScavenged"+suffix] = test{
   145  			scavenged: []BitRange{{0, PallocChunkPages}},
   146  			min:       m,
   147  			max:       PallocChunkPages,
   148  			want:      BitRange{0, 0},
   149  		}
   150  		tests["NoneFree"+suffix] = test{
   151  			alloc:     []BitRange{{0, PallocChunkPages}},
   152  			scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
   153  			min:       m,
   154  			max:       PallocChunkPages,
   155  			want:      BitRange{0, 0},
   156  		}
   157  		tests["StartFree"+suffix] = test{
   158  			alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
   159  			min:   m,
   160  			max:   PallocChunkPages,
   161  			want:  BitRange{0, uint(m)},
   162  		}
   163  		tests["EndFree"+suffix] = test{
   164  			alloc: []BitRange{{0, PallocChunkPages - uint(m)}},
   165  			min:   m,
   166  			max:   PallocChunkPages,
   167  			want:  BitRange{PallocChunkPages - uint(m), uint(m)},
   168  		}
   169  		if PallocChunkPages >= 512 {
   170  			tests["Straddle64"+suffix] = test{
   171  				alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
   172  				min:   m,
   173  				max:   2 * m,
   174  				want:  BitRange{64 - uint(m), 2 * uint(m)},
   175  			}
   176  			tests["BottomEdge64WithFull"+suffix] = test{
   177  				alloc:     []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
   178  				scavenged: []BitRange{{1, 10}},
   179  				min:       m,
   180  				max:       3 * m,
   181  				want:      BitRange{128, 3 * uint(m)},
   182  			}
   183  			tests["BottomEdge64WithPocket"+suffix] = test{
   184  				alloc:     []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
   185  				scavenged: []BitRange{{1, 10}},
   186  				min:       m,
   187  				max:       3 * m,
   188  				want:      BitRange{128, 3 * uint(m)},
   189  			}
   190  		}
   191  		tests["Max0"+suffix] = test{
   192  			scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
   193  			min:       m,
   194  			max:       0,
   195  			want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   196  		}
   197  		if m <= 8 {
   198  			tests["OneFree"] = test{
   199  				alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
   200  				min:   m,
   201  				max:   PallocChunkPages,
   202  				want:  BitRange{40, uint(m)},
   203  			}
   204  			tests["OneScavenged"] = test{
   205  				alloc:     []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
   206  				scavenged: []BitRange{{40, 1}},
   207  				min:       m,
   208  				max:       PallocChunkPages,
   209  				want:      BitRange{0, 0},
   210  			}
   211  		}
   212  		if m > 1 {
   213  			if PallocChunkPages >= m*2 {
   214  				tests["MaxUnaligned"+suffix] = test{
   215  					scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
   216  					min:       m,
   217  					max:       m - 2,
   218  					want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   219  				}
   220  			}
   221  			if PallocChunkPages >= 512 {
   222  				// avoid constant overflow when PallocChunkPages is small
   223  				var PallocChunkPages uint = PallocChunkPages
   224  				tests["SkipSmall"+suffix] = test{
   225  					alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
   226  					min:   m,
   227  					max:   m,
   228  					want:  BitRange{64 - uint(m), uint(m)},
   229  				}
   230  				tests["SkipMisaligned"+suffix] = test{
   231  					alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
   232  					min:   m,
   233  					max:   m,
   234  					want:  BitRange{64 - uint(m), uint(m)},
   235  				}
   236  			}
   237  			tests["MaxLessThan"+suffix] = test{
   238  				scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
   239  				min:       m,
   240  				max:       1,
   241  				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   242  			}
   243  		}
   244  	}
   245  	if PhysHugePageSize > uintptr(PageSize) {
   246  		// Check hugepage preserving behavior.
   247  		bits := uint(PhysHugePageSize / uintptr(PageSize))
   248  		if bits < PallocChunkPages {
   249  			tests["PreserveHugePageBottom"] = test{
   250  				alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}},
   251  				min:   1,
   252  				max:   3, // Make it so that max would have us try to break the huge page.
   253  				want:  BitRange{0, bits + 2},
   254  			}
   255  			if 3*bits < PallocChunkPages {
   256  				// We need at least 3 huge pages in a chunk for this test to make sense.
   257  				tests["PreserveHugePageMiddle"] = test{
   258  					alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}},
   259  					min:   1,
   260  					max:   12, // Make it so that max would have us try to break the huge page.
   261  					want:  BitRange{bits, bits + 10},
   262  				}
   263  			}
   264  			tests["PreserveHugePageTop"] = test{
   265  				alloc: []BitRange{{0, PallocChunkPages - bits}},
   266  				min:   1,
   267  				max:   1, // Even one page would break a huge page in this case.
   268  				want:  BitRange{PallocChunkPages - bits, bits},
   269  			}
   270  		} else if bits == PallocChunkPages {
   271  			tests["PreserveHugePageAll"] = test{
   272  				min:  1,
   273  				max:  1, // Even one page would break a huge page in this case.
   274  				want: BitRange{0, PallocChunkPages},
   275  			}
   276  		} else {
   277  			// The huge page size is greater than pallocChunkPages, so it should
   278  			// be effectively disabled. There's no way we can possible scavenge
   279  			// a huge page out of this bitmap chunk.
   280  			tests["PreserveHugePageNone"] = test{
   281  				min:  1,
   282  				max:  1,
   283  				want: BitRange{PallocChunkPages - 1, 1},
   284  			}
   285  		}
   286  	}
   287  	for name, v := range tests {
   288  		t.Run(name, func(t *testing.T) {
   289  			b := makePallocData(v.alloc, v.scavenged)
   290  			start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
   291  			got := BitRange{start, size}
   292  			if !(got.N == 0 && v.want.N == 0) && got != v.want {
   293  				t.Fatalf("candidate mismatch: got %v, want %v", got, v.want)
   294  			}
   295  		})
   296  	}
   297  }
   298  
   299  // Tests end-to-end scavenging on a pageAlloc.
   300  func TestPageAllocScavenge(t *testing.T) {
   301  	if GOOS == "openbsd" && testing.Short() {
   302  		t.Skip("skipping because virtual memory is limited; see #36210")
   303  	}
   304  	type test struct {
   305  		request, expect uintptr
   306  	}
   307  	minPages := PhysPageSize / PageSize
   308  	if minPages < 1 {
   309  		minPages = 1
   310  	}
   311  	type setup struct {
   312  		beforeAlloc map[ChunkIdx][]BitRange
   313  		beforeScav  map[ChunkIdx][]BitRange
   314  		expect      []test
   315  		afterScav   map[ChunkIdx][]BitRange
   316  	}
   317  	tests := map[string]setup{
   318  		"AllFreeUnscavExhaust": {
   319  			beforeAlloc: map[ChunkIdx][]BitRange{
   320  				BaseChunkIdx:     {},
   321  				BaseChunkIdx + 1: {},
   322  				BaseChunkIdx + 2: {},
   323  			},
   324  			beforeScav: map[ChunkIdx][]BitRange{
   325  				BaseChunkIdx:     {},
   326  				BaseChunkIdx + 1: {},
   327  				BaseChunkIdx + 2: {},
   328  			},
   329  			expect: []test{
   330  				{^uintptr(0), 3 * PallocChunkPages * PageSize},
   331  			},
   332  			afterScav: map[ChunkIdx][]BitRange{
   333  				BaseChunkIdx:     {{0, PallocChunkPages}},
   334  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   335  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
   336  			},
   337  		},
   338  		"NoneFreeUnscavExhaust": {
   339  			beforeAlloc: map[ChunkIdx][]BitRange{
   340  				BaseChunkIdx:     {{0, PallocChunkPages}},
   341  				BaseChunkIdx + 1: {},
   342  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
   343  			},
   344  			beforeScav: map[ChunkIdx][]BitRange{
   345  				BaseChunkIdx:     {},
   346  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   347  				BaseChunkIdx + 2: {},
   348  			},
   349  			expect: []test{
   350  				{^uintptr(0), 0},
   351  			},
   352  			afterScav: map[ChunkIdx][]BitRange{
   353  				BaseChunkIdx:     {},
   354  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   355  				BaseChunkIdx + 2: {},
   356  			},
   357  		},
   358  		"ScavHighestPageFirst": {
   359  			beforeAlloc: map[ChunkIdx][]BitRange{
   360  				BaseChunkIdx: {},
   361  			},
   362  			beforeScav: map[ChunkIdx][]BitRange{
   363  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   364  			},
   365  			expect: []test{
   366  				{1, minPages * PageSize},
   367  			},
   368  			afterScav: map[ChunkIdx][]BitRange{
   369  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},
   370  			},
   371  		},
   372  		"ScavMultiple": {
   373  			beforeAlloc: map[ChunkIdx][]BitRange{
   374  				BaseChunkIdx: {},
   375  			},
   376  			beforeScav: map[ChunkIdx][]BitRange{
   377  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   378  			},
   379  			expect: []test{
   380  				{minPages * PageSize, minPages * PageSize},
   381  				{minPages * PageSize, minPages * PageSize},
   382  			},
   383  			afterScav: map[ChunkIdx][]BitRange{
   384  				BaseChunkIdx: {{0, PallocChunkPages}},
   385  			},
   386  		},
   387  		"ScavMultiple2": {
   388  			beforeAlloc: map[ChunkIdx][]BitRange{
   389  				BaseChunkIdx:     {},
   390  				BaseChunkIdx + 1: {},
   391  			},
   392  			beforeScav: map[ChunkIdx][]BitRange{
   393  				BaseChunkIdx:     {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   394  				BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},
   395  			},
   396  			expect: []test{
   397  				{2 * minPages * PageSize, 2 * minPages * PageSize},
   398  				{minPages * PageSize, minPages * PageSize},
   399  				{minPages * PageSize, minPages * PageSize},
   400  			},
   401  			afterScav: map[ChunkIdx][]BitRange{
   402  				BaseChunkIdx:     {{0, PallocChunkPages}},
   403  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   404  			},
   405  		},
   406  		"ScavDiscontiguous": {
   407  			beforeAlloc: map[ChunkIdx][]BitRange{
   408  				BaseChunkIdx:       {},
   409  				BaseChunkIdx + 0xe: {},
   410  			},
   411  			beforeScav: map[ChunkIdx][]BitRange{
   412  				BaseChunkIdx:       {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   413  				BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},
   414  			},
   415  			expect: []test{
   416  				{2 * minPages * PageSize, 2 * minPages * PageSize},
   417  				{^uintptr(0), 2 * minPages * PageSize},
   418  				{^uintptr(0), 0},
   419  			},
   420  			afterScav: map[ChunkIdx][]BitRange{
   421  				BaseChunkIdx:       {{0, PallocChunkPages}},
   422  				BaseChunkIdx + 0xe: {{0, PallocChunkPages}},
   423  			},
   424  		},
   425  	}
   426  	// Disable these tests on iOS since we have a small address space.
   427  	// See #46860.
   428  	if PageAlloc64Bit != 0 && goos.IsIos == 0 {
   429  		tests["ScavAllVeryDiscontiguous"] = setup{
   430  			beforeAlloc: map[ChunkIdx][]BitRange{
   431  				BaseChunkIdx:          {},
   432  				BaseChunkIdx + 0x1000: {},
   433  			},
   434  			beforeScav: map[ChunkIdx][]BitRange{
   435  				BaseChunkIdx:          {},
   436  				BaseChunkIdx + 0x1000: {},
   437  			},
   438  			expect: []test{
   439  				{^uintptr(0), 2 * PallocChunkPages * PageSize},
   440  				{^uintptr(0), 0},
   441  			},
   442  			afterScav: map[ChunkIdx][]BitRange{
   443  				BaseChunkIdx:          {{0, PallocChunkPages}},
   444  				BaseChunkIdx + 0x1000: {{0, PallocChunkPages}},
   445  			},
   446  		}
   447  	}
   448  	for name, v := range tests {
   449  		t.Run(name, func(t *testing.T) {
   450  			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
   451  			defer FreePageAlloc(b)
   452  
   453  			for iter, h := range v.expect {
   454  				if got := b.Scavenge(h.request); got != h.expect {
   455  					t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
   456  				}
   457  			}
   458  			want := NewPageAlloc(v.beforeAlloc, v.afterScav)
   459  			defer FreePageAlloc(want)
   460  
   461  			checkPageAlloc(t, want, b)
   462  		})
   463  	}
   464  }
   465  
   466  func TestScavenger(t *testing.T) {
   467  	// workedTime is a standard conversion of bytes of scavenge
   468  	// work to time elapsed.
   469  	workedTime := func(bytes uintptr) int64 {
   470  		return int64((bytes+4095)/4096) * int64(10*time.Microsecond)
   471  	}
   472  
   473  	// Set up a bunch of state that we're going to track and verify
   474  	// throughout the test.
   475  	totalWork := uint64(64<<20 - 3*PhysPageSize)
   476  	var totalSlept, totalWorked atomic.Int64
   477  	var availableWork atomic.Uint64
   478  	var stopAt atomic.Uint64 // How much available work to stop at.
   479  
   480  	// Set up the scavenger.
   481  	var s Scavenger
   482  	s.Sleep = func(ns int64) int64 {
   483  		totalSlept.Add(ns)
   484  		return ns
   485  	}
   486  	s.Scavenge = func(bytes uintptr) (uintptr, int64) {
   487  		avail := availableWork.Load()
   488  		if uint64(bytes) > avail {
   489  			bytes = uintptr(avail)
   490  		}
   491  		t := workedTime(bytes)
   492  		if bytes != 0 {
   493  			availableWork.Add(-int64(bytes))
   494  			totalWorked.Add(t)
   495  		}
   496  		return bytes, t
   497  	}
   498  	s.ShouldStop = func() bool {
   499  		if availableWork.Load() <= stopAt.Load() {
   500  			return true
   501  		}
   502  		return false
   503  	}
   504  	s.GoMaxProcs = func() int32 {
   505  		return 1
   506  	}
   507  
   508  	// Define a helper for verifying that various properties hold.
   509  	verifyScavengerState := func(t *testing.T, expWork uint64) {
   510  		t.Helper()
   511  
   512  		// Check to make sure it did the amount of work we expected.
   513  		if workDone := uint64(s.Released()); workDone != expWork {
   514  			t.Errorf("want %d bytes of work done, got %d", expWork, workDone)
   515  		}
   516  		// Check to make sure the scavenger is meeting its CPU target.
   517  		idealFraction := float64(ScavengePercent) / 100.0
   518  		cpuFraction := float64(totalWorked.Load()) / float64(totalWorked.Load()+totalSlept.Load())
   519  		if cpuFraction < idealFraction-0.005 || cpuFraction > idealFraction+0.005 {
   520  			t.Errorf("want %f CPU fraction, got %f", idealFraction, cpuFraction)
   521  		}
   522  	}
   523  
   524  	// Start the scavenger.
   525  	s.Start()
   526  
   527  	// Set up some work and let the scavenger run to completion.
   528  	availableWork.Store(totalWork)
   529  	s.Wake()
   530  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   531  		t.Fatal("timed out waiting for scavenger to run to completion")
   532  	}
   533  	// Run a check.
   534  	verifyScavengerState(t, totalWork)
   535  
   536  	// Now let's do it again and see what happens when we have no work to do.
   537  	// It should've gone right back to sleep.
   538  	s.Wake()
   539  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   540  		t.Fatal("timed out waiting for scavenger to run to completion")
   541  	}
   542  	// Run another check.
   543  	verifyScavengerState(t, totalWork)
   544  
   545  	// One more time, this time doing the same amount of work as the first time.
   546  	// Let's see if we can get the scavenger to continue.
   547  	availableWork.Store(totalWork)
   548  	s.Wake()
   549  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   550  		t.Fatal("timed out waiting for scavenger to run to completion")
   551  	}
   552  	// Run another check.
   553  	verifyScavengerState(t, 2*totalWork)
   554  
   555  	// This time, let's stop after a certain amount of work.
   556  	//
   557  	// Pick a stopping point such that when subtracted from totalWork
   558  	// we get a multiple of a relatively large power of 2. verifyScavengerState
   559  	// always makes an exact check, but the scavenger might go a little over,
   560  	// which is OK. If this breaks often or gets annoying to maintain, modify
   561  	// verifyScavengerState.
   562  	availableWork.Store(totalWork)
   563  	stoppingPoint := uint64(1<<20 - 3*PhysPageSize)
   564  	stopAt.Store(stoppingPoint)
   565  	s.Wake()
   566  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   567  		t.Fatal("timed out waiting for scavenger to run to completion")
   568  	}
   569  	// Run another check.
   570  	verifyScavengerState(t, 2*totalWork+(totalWork-stoppingPoint))
   571  
   572  	// Clean up.
   573  	s.Stop()
   574  }
   575  
   576  func TestScavengeIndex(t *testing.T) {
   577  	// This test suite tests the scavengeIndex data structure.
   578  
   579  	// markFunc is a function that makes the address range [base, limit)
   580  	// available for scavenging in a test index.
   581  	type markFunc func(base, limit uintptr)
   582  
   583  	// findFunc is a function that searches for the next available page
   584  	// to scavenge in the index. It asserts that the page is found in
   585  	// chunk "ci" at page "offset."
   586  	type findFunc func(ci ChunkIdx, offset uint)
   587  
   588  	// The structure of the tests below is as follows:
   589  	//
   590  	// setup creates a fake scavengeIndex that can be mutated and queried by
   591  	// the functions it returns. Those functions capture the testing.T that
   592  	// setup is called with, so they're bound to the subtest they're created in.
   593  	//
   594  	// Tests are then organized into test cases which mark some pages as
   595  	// scavenge-able then try to find them. Tests expect that the initial
   596  	// state of the scavengeIndex has all of the chunks as dense in the last
   597  	// generation and empty to the scavenger.
   598  	//
   599  	// There are a few additional tests that interleave mark and find operations,
   600  	// so they're defined separately, but use the same infrastructure.
   601  	setup := func(t *testing.T, force bool) (mark markFunc, find findFunc, nextGen func()) {
   602  		t.Helper()
   603  
   604  		// Pick some reasonable bounds. We don't need a huge range just to test.
   605  		si := NewScavengeIndex(BaseChunkIdx, BaseChunkIdx+64)
   606  
   607  		// Initialize all the chunks as dense and empty.
   608  		//
   609  		// Also, reset search addresses so that we can get page offsets.
   610  		si.AllocRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0))
   611  		si.NextGen()
   612  		si.FreeRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0))
   613  		for ci := BaseChunkIdx; ci < BaseChunkIdx+64; ci++ {
   614  			si.SetEmpty(ci)
   615  		}
   616  		si.ResetSearchAddrs()
   617  
   618  		// Create and return test functions.
   619  		mark = func(base, limit uintptr) {
   620  			t.Helper()
   621  
   622  			si.AllocRange(base, limit)
   623  			si.FreeRange(base, limit)
   624  		}
   625  		find = func(want ChunkIdx, wantOffset uint) {
   626  			t.Helper()
   627  
   628  			got, gotOffset := si.Find(force)
   629  			if want != got {
   630  				t.Errorf("find: wanted chunk index %d, got %d", want, got)
   631  			}
   632  			if wantOffset != gotOffset {
   633  				t.Errorf("find: wanted page offset %d, got %d", wantOffset, gotOffset)
   634  			}
   635  			if t.Failed() {
   636  				t.FailNow()
   637  			}
   638  			si.SetEmpty(got)
   639  		}
   640  		nextGen = func() {
   641  			t.Helper()
   642  
   643  			si.NextGen()
   644  		}
   645  		return
   646  	}
   647  
   648  	// Each of these test cases calls mark and then find once.
   649  	type testCase struct {
   650  		name string
   651  		mark func(markFunc)
   652  		find func(findFunc)
   653  	}
   654  	tests := []testCase{
   655  		{
   656  			name: "Uninitialized",
   657  			mark: func(_ markFunc) {},
   658  			find: func(_ findFunc) {},
   659  		},
   660  		{
   661  			name: "OnePage",
   662  			mark: func(mark markFunc) {
   663  				mark(PageBase(BaseChunkIdx, 3), PageBase(BaseChunkIdx, 4))
   664  			},
   665  			find: func(find findFunc) {
   666  				find(BaseChunkIdx, 3)
   667  			},
   668  		},
   669  		{
   670  			name: "FirstPage",
   671  			mark: func(mark markFunc) {
   672  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx, 1))
   673  			},
   674  			find: func(find findFunc) {
   675  				find(BaseChunkIdx, 0)
   676  			},
   677  		},
   678  		{
   679  			name: "SeveralPages",
   680  			mark: func(mark markFunc) {
   681  				mark(PageBase(BaseChunkIdx, 9), PageBase(BaseChunkIdx, 14))
   682  			},
   683  			find: func(find findFunc) {
   684  				find(BaseChunkIdx, 13)
   685  			},
   686  		},
   687  		{
   688  			name: "WholeChunk",
   689  			mark: func(mark markFunc) {
   690  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   691  			},
   692  			find: func(find findFunc) {
   693  				find(BaseChunkIdx, PallocChunkPages-1)
   694  			},
   695  		},
   696  		{
   697  			name: "LastPage",
   698  			mark: func(mark markFunc) {
   699  				mark(PageBase(BaseChunkIdx, PallocChunkPages-1), PageBase(BaseChunkIdx+1, 0))
   700  			},
   701  			find: func(find findFunc) {
   702  				find(BaseChunkIdx, PallocChunkPages-1)
   703  			},
   704  		},
   705  		{
   706  			name: "SevenChunksOffset",
   707  			mark: func(mark markFunc) {
   708  				mark(PageBase(BaseChunkIdx+6, 11), PageBase(BaseChunkIdx+13, 15))
   709  			},
   710  			find: func(find findFunc) {
   711  				find(BaseChunkIdx+13, 14)
   712  				for i := BaseChunkIdx + 12; i >= BaseChunkIdx+6; i-- {
   713  					find(i, PallocChunkPages-1)
   714  				}
   715  			},
   716  		},
   717  		{
   718  			name: "ThirtyTwoChunks",
   719  			mark: func(mark markFunc) {
   720  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   721  			},
   722  			find: func(find findFunc) {
   723  				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   724  					find(i, PallocChunkPages-1)
   725  				}
   726  			},
   727  		},
   728  		{
   729  			name: "ThirtyTwoChunksOffset",
   730  			mark: func(mark markFunc) {
   731  				mark(PageBase(BaseChunkIdx+3, 0), PageBase(BaseChunkIdx+35, 0))
   732  			},
   733  			find: func(find findFunc) {
   734  				for i := BaseChunkIdx + 34; i >= BaseChunkIdx+3; i-- {
   735  					find(i, PallocChunkPages-1)
   736  				}
   737  			},
   738  		},
   739  		{
   740  			name: "Mark",
   741  			mark: func(mark markFunc) {
   742  				for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
   743  					mark(PageBase(i, 0), PageBase(i+1, 0))
   744  				}
   745  			},
   746  			find: func(find findFunc) {
   747  				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   748  					find(i, PallocChunkPages-1)
   749  				}
   750  			},
   751  		},
   752  		{
   753  			name: "MarkIdempotentOneChunk",
   754  			mark: func(mark markFunc) {
   755  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   756  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   757  			},
   758  			find: func(find findFunc) {
   759  				find(BaseChunkIdx, PallocChunkPages-1)
   760  			},
   761  		},
   762  		{
   763  			name: "MarkIdempotentThirtyTwoChunks",
   764  			mark: func(mark markFunc) {
   765  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   766  				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   767  			},
   768  			find: func(find findFunc) {
   769  				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   770  					find(i, PallocChunkPages-1)
   771  				}
   772  			},
   773  		},
   774  		{
   775  			name: "MarkIdempotentThirtyTwoChunksOffset",
   776  			mark: func(mark markFunc) {
   777  				mark(PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+31, 0))
   778  				mark(PageBase(BaseChunkIdx+5, 0), PageBase(BaseChunkIdx+36, 0))
   779  			},
   780  			find: func(find findFunc) {
   781  				for i := BaseChunkIdx + 35; i >= BaseChunkIdx+4; i-- {
   782  					find(i, PallocChunkPages-1)
   783  				}
   784  			},
   785  		},
   786  	}
   787  	if PallocChunkPages >= 512 {
   788  		tests = append(tests,
   789  			testCase{
   790  				name: "TwoChunks",
   791  				mark: func(mark markFunc) {
   792  					mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
   793  				},
   794  				find: func(find findFunc) {
   795  					find(BaseChunkIdx+1, 127)
   796  					find(BaseChunkIdx, PallocChunkPages-1)
   797  				},
   798  			},
   799  			testCase{
   800  				name: "TwoChunksOffset",
   801  				mark: func(mark markFunc) {
   802  					mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
   803  				},
   804  				find: func(find findFunc) {
   805  					find(BaseChunkIdx+8, 128)
   806  					find(BaseChunkIdx+7, PallocChunkPages-1)
   807  				},
   808  			},
   809  		)
   810  	}
   811  	for _, test := range tests {
   812  		t.Run("Bg/"+test.name, func(t *testing.T) {
   813  			mark, find, nextGen := setup(t, false)
   814  			test.mark(mark)
   815  			find(0, 0)      // Make sure we find nothing at this point.
   816  			nextGen()       // Move to the next generation.
   817  			test.find(find) // Now we should be able to find things.
   818  			find(0, 0)      // The test should always fully exhaust the index.
   819  		})
   820  		t.Run("Force/"+test.name, func(t *testing.T) {
   821  			mark, find, _ := setup(t, true)
   822  			test.mark(mark)
   823  			test.find(find) // Finding should always work when forced.
   824  			find(0, 0)      // The test should always fully exhaust the index.
   825  		})
   826  	}
   827  	t.Run("Bg/MarkInterleaved", func(t *testing.T) {
   828  		mark, find, nextGen := setup(t, false)
   829  		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
   830  			mark(PageBase(i, 0), PageBase(i+1, 0))
   831  			nextGen()
   832  			find(i, PallocChunkPages-1)
   833  		}
   834  		find(0, 0)
   835  	})
   836  	t.Run("Force/MarkInterleaved", func(t *testing.T) {
   837  		mark, find, _ := setup(t, true)
   838  		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
   839  			mark(PageBase(i, 0), PageBase(i+1, 0))
   840  			find(i, PallocChunkPages-1)
   841  		}
   842  		find(0, 0)
   843  	})
   844  }
   845  
   846  func TestScavChunkDataPack(t *testing.T) {
   847  	if PallocChunkPages >= 512 {
   848  		if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) {
   849  			t.Error("failed pack/unpack check for scavChunkData 1")
   850  		}
   851  	}
   852  	if !CheckPackScavChunkData(^uint32(0), 12, 0, 0b00) {
   853  		t.Error("failed pack/unpack check for scavChunkData 2")
   854  	}
   855  }
   856  
   857  func FuzzPIController(f *testing.F) {
   858  	isNormal := func(x float64) bool {
   859  		return !math.IsInf(x, 0) && !math.IsNaN(x)
   860  	}
   861  	isPositive := func(x float64) bool {
   862  		return isNormal(x) && x > 0
   863  	}
   864  	// Seed with constants from controllers in the runtime.
   865  	// It's not critical that we keep these in sync, they're just
   866  	// reasonable seed inputs.
   867  	f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
   868  	f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
   869  	f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
   870  		// Ignore uninteresting invalid parameters. These parameters
   871  		// are constant, so in practice surprising values will be documented
   872  		// or will be other otherwise immediately visible.
   873  		//
   874  		// We just want to make sure that given a non-Inf, non-NaN input,
   875  		// we always get a non-Inf, non-NaN output.
   876  		if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
   877  			return
   878  		}
   879  		if !isNormal(min) || !isNormal(max) || min > max {
   880  			return
   881  		}
   882  		// Use a random source, but make it deterministic.
   883  		rs := rand.New(rand.NewSource(800))
   884  		randFloat64 := func() float64 {
   885  			return math.Float64frombits(rs.Uint64())
   886  		}
   887  		p := NewPIController(kp, ti, tt, min, max)
   888  		state := float64(0)
   889  		for i := 0; i < 100; i++ {
   890  			input := randFloat64()
   891  			// Ignore the "ok" parameter. We're just trying to break it.
   892  			// state is intentionally completely uncorrelated with the input.
   893  			var ok bool
   894  			state, ok = p.Next(input, setPoint, 1.0)
   895  			if !isNormal(state) {
   896  				t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)
   897  			}
   898  		}
   899  	})
   900  }
   901  

View as plain text