Source file
src/runtime/malloc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "flag"
9 "fmt"
10 "internal/asan"
11 "internal/race"
12 "internal/testenv"
13 "os"
14 "os/exec"
15 "reflect"
16 "runtime"
17 . "runtime"
18 "strings"
19 "sync/atomic"
20 "testing"
21 "time"
22 "unsafe"
23 )
24
25 var testMemStatsCount int
26
27 func TestMemStats(t *testing.T) {
28 testMemStatsCount++
29
30
31 GC()
32
33
34 st := new(MemStats)
35 ReadMemStats(st)
36
37 nz := func(x any) error {
38 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
39 return nil
40 }
41 return fmt.Errorf("zero value")
42 }
43 le := func(thresh float64) func(any) error {
44 return func(x any) error {
45
46
47
48 if testMemStatsCount > 1 {
49 return nil
50 }
51
52 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
53 return nil
54 }
55 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
56 }
57 }
58 eq := func(x any) func(any) error {
59 return func(y any) error {
60 if x == y {
61 return nil
62 }
63 return fmt.Errorf("want %v", x)
64 }
65 }
66
67
68 fields := map[string][]func(any) error{
69 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
70 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
71 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
72 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
73 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
74 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
75 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
76 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
77 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
78 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
79 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
80 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
81 "BySize": nil,
82 }
83
84 rst := reflect.ValueOf(st).Elem()
85 for i := 0; i < rst.Type().NumField(); i++ {
86 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
87 checks, ok := fields[name]
88 if !ok {
89 t.Errorf("unknown MemStats field %s", name)
90 continue
91 }
92 for _, check := range checks {
93 if err := check(val); err != nil {
94 t.Errorf("%s = %v: %s", name, val, err)
95 }
96 }
97 }
98
99 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
100 st.BuckHashSys+st.GCSys+st.OtherSys {
101 t.Fatalf("Bad sys value: %+v", *st)
102 }
103
104 if st.HeapIdle+st.HeapInuse != st.HeapSys {
105 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
106 }
107
108 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
109 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
110 }
111
112 var pauseTotal uint64
113 for _, pause := range st.PauseNs {
114 pauseTotal += pause
115 }
116 if int(st.NumGC) < len(st.PauseNs) {
117
118 if st.PauseTotalNs != pauseTotal {
119 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
120 }
121 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
122 if st.PauseNs[i] != 0 {
123 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
124 }
125 if st.PauseEnd[i] != 0 {
126 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
127 }
128 }
129 } else {
130 if st.PauseTotalNs < pauseTotal {
131 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
132 }
133 }
134
135 if st.NumForcedGC > st.NumGC {
136 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
137 }
138 }
139
140 func TestStringConcatenationAllocs(t *testing.T) {
141 n := testing.AllocsPerRun(1e3, func() {
142 b := make([]byte, 10)
143 for i := 0; i < 10; i++ {
144 b[i] = byte(i) + '0'
145 }
146 s := "foo" + string(b)
147 if want := "foo0123456789"; s != want {
148 t.Fatalf("want %v, got %v", want, s)
149 }
150 })
151
152 if n != 1 {
153 t.Fatalf("want 1 allocation, got %v", n)
154 }
155 }
156
157 func TestTinyAlloc(t *testing.T) {
158 if runtime.Raceenabled {
159 t.Skip("tinyalloc suppressed when running in race mode")
160 }
161 if asan.Enabled {
162 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
163 }
164 const N = 16
165 var v [N]unsafe.Pointer
166 for i := range v {
167 v[i] = unsafe.Pointer(new(byte))
168 }
169
170 chunks := make(map[uintptr]bool, N)
171 for _, p := range v {
172 chunks[uintptr(p)&^7] = true
173 }
174
175 if len(chunks) == N {
176 t.Fatal("no bytes allocated within the same 8-byte chunk")
177 }
178 }
179
180 type obj12 struct {
181 a uint64
182 b uint32
183 }
184
185 func TestTinyAllocIssue37262(t *testing.T) {
186 if runtime.Raceenabled {
187 t.Skip("tinyalloc suppressed when running in race mode")
188 }
189 if asan.Enabled {
190 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
191 }
192
193
194
195
196
197
198
199 runtime.GC()
200 runtime.GC()
201
202
203
204 runtime.Acquirem()
205
206
207 aligned := false
208 for i := 0; i < 16; i++ {
209 x := runtime.Escape(new(byte))
210 if uintptr(unsafe.Pointer(x))&0xf == 0xf {
211 aligned = true
212 break
213 }
214 }
215 if !aligned {
216 runtime.Releasem()
217 t.Fatal("unable to get a fresh tiny slot")
218 }
219
220
221
222 runtime.Escape(new(uint32))
223
224
225
226
227
228
229 tinyObj12 := runtime.Escape(new(obj12))
230
231
232 atomic.StoreUint64(&tinyObj12.a, 10)
233
234 runtime.Releasem()
235 }
236
237 func TestPageCacheLeak(t *testing.T) {
238 defer GOMAXPROCS(GOMAXPROCS(1))
239 leaked := PageCachePagesLeaked()
240 if leaked != 0 {
241 t.Fatalf("found %d leaked pages in page caches", leaked)
242 }
243 }
244
245 func TestPhysicalMemoryUtilization(t *testing.T) {
246 got := runTestProg(t, "testprog", "GCPhys")
247 want := "OK\n"
248 if got != want {
249 t.Fatalf("expected %q, but got %q", want, got)
250 }
251 }
252
253 func TestScavengedBitsCleared(t *testing.T) {
254 var mismatches [128]BitsMismatch
255 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
256 t.Errorf("uncleared scavenged bits")
257 for _, m := range mismatches[:n] {
258 t.Logf("\t@ address 0x%x", m.Base)
259 t.Logf("\t| got: %064b", m.Got)
260 t.Logf("\t| want: %064b", m.Want)
261 }
262 t.FailNow()
263 }
264 }
265
266 type acLink struct {
267 x [1 << 20]byte
268 }
269
270 var arenaCollisionSink []*acLink
271
272 func TestArenaCollision(t *testing.T) {
273 testenv.MustHaveExec(t)
274
275
276
277 if os.Getenv("TEST_ARENA_COLLISION") != "1" {
278 cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v"))
279 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
280 out, err := cmd.CombinedOutput()
281 if race.Enabled {
282
283
284
285
286
287 if want := "too many address space collisions"; !strings.Contains(string(out), want) {
288 t.Fatalf("want %q, got:\n%s", want, string(out))
289 }
290 } else if !strings.Contains(string(out), "PASS\n") || err != nil {
291 t.Fatalf("%s\n(exit status %v)", string(out), err)
292 }
293 return
294 }
295 disallowed := [][2]uintptr{}
296
297
298 KeepNArenaHints(3)
299
300
301 for i := 0; i < 5; i++ {
302
303
304 start, end, ok := MapNextArenaHint()
305 if !ok {
306 t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
307 }
308 t.Logf("reserved [%#x, %#x)", start, end)
309 disallowed = append(disallowed, [2]uintptr{start, end})
310
311
312 hint := GetNextArenaHint()
313 for GetNextArenaHint() == hint {
314 ac := new(acLink)
315 arenaCollisionSink = append(arenaCollisionSink, ac)
316
317
318 p := uintptr(unsafe.Pointer(ac))
319 for _, d := range disallowed {
320 if d[0] <= p && p < d[1] {
321 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
322 }
323 }
324 }
325 }
326 }
327
328 func BenchmarkMalloc8(b *testing.B) {
329 for i := 0; i < b.N; i++ {
330 p := new(int64)
331 Escape(p)
332 }
333 }
334
335 func BenchmarkMalloc16(b *testing.B) {
336 for i := 0; i < b.N; i++ {
337 p := new([2]int64)
338 Escape(p)
339 }
340 }
341
342 func BenchmarkMallocTypeInfo8(b *testing.B) {
343 for i := 0; i < b.N; i++ {
344 p := new(struct {
345 p [8 / unsafe.Sizeof(uintptr(0))]*int
346 })
347 Escape(p)
348 }
349 }
350
351 func BenchmarkMallocTypeInfo16(b *testing.B) {
352 for i := 0; i < b.N; i++ {
353 p := new(struct {
354 p [16 / unsafe.Sizeof(uintptr(0))]*int
355 })
356 Escape(p)
357 }
358 }
359
360 type LargeStruct struct {
361 x [16][]byte
362 }
363
364 func BenchmarkMallocLargeStruct(b *testing.B) {
365 for i := 0; i < b.N; i++ {
366 p := make([]LargeStruct, 2)
367 Escape(p)
368 }
369 }
370
371 var n = flag.Int("n", 1000, "number of goroutines")
372
373 func BenchmarkGoroutineSelect(b *testing.B) {
374 quit := make(chan struct{})
375 read := func(ch chan struct{}) {
376 for {
377 select {
378 case _, ok := <-ch:
379 if !ok {
380 return
381 }
382 case <-quit:
383 return
384 }
385 }
386 }
387 benchHelper(b, *n, read)
388 }
389
390 func BenchmarkGoroutineBlocking(b *testing.B) {
391 read := func(ch chan struct{}) {
392 for {
393 if _, ok := <-ch; !ok {
394 return
395 }
396 }
397 }
398 benchHelper(b, *n, read)
399 }
400
401 func BenchmarkGoroutineForRange(b *testing.B) {
402 read := func(ch chan struct{}) {
403 for range ch {
404 }
405 }
406 benchHelper(b, *n, read)
407 }
408
409 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
410 m := make([]chan struct{}, n)
411 for i := range m {
412 m[i] = make(chan struct{}, 1)
413 go read(m[i])
414 }
415 b.StopTimer()
416 b.ResetTimer()
417 GC()
418
419 for i := 0; i < b.N; i++ {
420 for _, ch := range m {
421 if ch != nil {
422 ch <- struct{}{}
423 }
424 }
425 time.Sleep(10 * time.Millisecond)
426 b.StartTimer()
427 GC()
428 b.StopTimer()
429 }
430
431 for _, ch := range m {
432 close(ch)
433 }
434 time.Sleep(10 * time.Millisecond)
435 }
436
437 func BenchmarkGoroutineIdle(b *testing.B) {
438 quit := make(chan struct{})
439 fn := func() {
440 <-quit
441 }
442 for i := 0; i < *n; i++ {
443 go fn()
444 }
445
446 GC()
447 b.ResetTimer()
448
449 for i := 0; i < b.N; i++ {
450 GC()
451 }
452
453 b.StopTimer()
454 close(quit)
455 time.Sleep(10 * time.Millisecond)
456 }
457
View as plain text