Source file
src/runtime/mgcwork.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/goarch"
9 "internal/runtime/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14 const (
15 _WorkbufSize = 2048
16
17
18
19
20
21
22
23 workbufAlloc = 32 << 10
24 )
25
26 func init() {
27 if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
28 throw("bad workbufAlloc")
29 }
30 }
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 type gcWork struct {
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 wbuf1, wbuf2 *workbuf
76
77
78
79 bytesMarked uint64
80
81
82
83
84 heapScanWork int64
85
86
87
88
89
90 flushedWork bool
91 }
92
93
94
95
96
97
98
99
100 func (w *gcWork) init() {
101 w.wbuf1 = getempty()
102 wbuf2 := trygetfull()
103 if wbuf2 == nil {
104 wbuf2 = getempty()
105 }
106 w.wbuf2 = wbuf2
107 }
108
109
110
111
112
113 func (w *gcWork) put(obj uintptr) {
114 flushed := false
115 wbuf := w.wbuf1
116
117
118 lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
119 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
120 if wbuf == nil {
121 w.init()
122 wbuf = w.wbuf1
123
124 } else if wbuf.nobj == len(wbuf.obj) {
125 w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
126 wbuf = w.wbuf1
127 if wbuf.nobj == len(wbuf.obj) {
128 putfull(wbuf)
129 w.flushedWork = true
130 wbuf = getempty()
131 w.wbuf1 = wbuf
132 flushed = true
133 }
134 }
135
136 wbuf.obj[wbuf.nobj] = obj
137 wbuf.nobj++
138
139
140
141
142
143 if flushed && gcphase == _GCmark {
144 gcController.enlistWorker()
145 }
146 }
147
148
149
150
151
152 func (w *gcWork) putFast(obj uintptr) bool {
153 wbuf := w.wbuf1
154 if wbuf == nil || wbuf.nobj == len(wbuf.obj) {
155 return false
156 }
157
158 wbuf.obj[wbuf.nobj] = obj
159 wbuf.nobj++
160 return true
161 }
162
163
164
165
166
167 func (w *gcWork) putBatch(obj []uintptr) {
168 if len(obj) == 0 {
169 return
170 }
171
172 flushed := false
173 wbuf := w.wbuf1
174 if wbuf == nil {
175 w.init()
176 wbuf = w.wbuf1
177 }
178
179 for len(obj) > 0 {
180 for wbuf.nobj == len(wbuf.obj) {
181 putfull(wbuf)
182 w.flushedWork = true
183 w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
184 wbuf = w.wbuf1
185 flushed = true
186 }
187 n := copy(wbuf.obj[wbuf.nobj:], obj)
188 wbuf.nobj += n
189 obj = obj[n:]
190 }
191
192 if flushed && gcphase == _GCmark {
193 gcController.enlistWorker()
194 }
195 }
196
197
198
199
200
201
202
203
204 func (w *gcWork) tryGet() uintptr {
205 wbuf := w.wbuf1
206 if wbuf == nil {
207 w.init()
208 wbuf = w.wbuf1
209
210 }
211 if wbuf.nobj == 0 {
212 w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
213 wbuf = w.wbuf1
214 if wbuf.nobj == 0 {
215 owbuf := wbuf
216 wbuf = trygetfull()
217 if wbuf == nil {
218 return 0
219 }
220 putempty(owbuf)
221 w.wbuf1 = wbuf
222 }
223 }
224
225 wbuf.nobj--
226 return wbuf.obj[wbuf.nobj]
227 }
228
229
230
231
232
233
234 func (w *gcWork) tryGetFast() uintptr {
235 wbuf := w.wbuf1
236 if wbuf == nil || wbuf.nobj == 0 {
237 return 0
238 }
239
240 wbuf.nobj--
241 return wbuf.obj[wbuf.nobj]
242 }
243
244
245
246
247
248
249
250
251 func (w *gcWork) dispose() {
252 if wbuf := w.wbuf1; wbuf != nil {
253 if wbuf.nobj == 0 {
254 putempty(wbuf)
255 } else {
256 putfull(wbuf)
257 w.flushedWork = true
258 }
259 w.wbuf1 = nil
260
261 wbuf = w.wbuf2
262 if wbuf.nobj == 0 {
263 putempty(wbuf)
264 } else {
265 putfull(wbuf)
266 w.flushedWork = true
267 }
268 w.wbuf2 = nil
269 }
270 if w.bytesMarked != 0 {
271
272
273
274
275 atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
276 w.bytesMarked = 0
277 }
278 if w.heapScanWork != 0 {
279 gcController.heapScanWork.Add(w.heapScanWork)
280 w.heapScanWork = 0
281 }
282 }
283
284
285
286
287
288 func (w *gcWork) balance() {
289 if w.wbuf1 == nil {
290 return
291 }
292 if wbuf := w.wbuf2; wbuf.nobj != 0 {
293 putfull(wbuf)
294 w.flushedWork = true
295 w.wbuf2 = getempty()
296 } else if wbuf := w.wbuf1; wbuf.nobj > 4 {
297 w.wbuf1 = handoff(wbuf)
298 w.flushedWork = true
299 } else {
300 return
301 }
302
303 if gcphase == _GCmark {
304 gcController.enlistWorker()
305 }
306 }
307
308
309
310
311 func (w *gcWork) empty() bool {
312 return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
313 }
314
315
316
317
318
319 type workbufhdr struct {
320 node lfnode
321 nobj int
322 }
323
324 type workbuf struct {
325 _ sys.NotInHeap
326 workbufhdr
327
328 obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
329 }
330
331
332
333
334
335
336 func (b *workbuf) checknonempty() {
337 if b.nobj == 0 {
338 throw("workbuf is empty")
339 }
340 }
341
342 func (b *workbuf) checkempty() {
343 if b.nobj != 0 {
344 throw("workbuf is not empty")
345 }
346 }
347
348
349
350
351
352 func getempty() *workbuf {
353 var b *workbuf
354 if work.empty != 0 {
355 b = (*workbuf)(work.empty.pop())
356 if b != nil {
357 b.checkempty()
358 }
359 }
360
361
362 lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
363 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
364 if b == nil {
365
366 var s *mspan
367 if work.wbufSpans.free.first != nil {
368 lock(&work.wbufSpans.lock)
369 s = work.wbufSpans.free.first
370 if s != nil {
371 work.wbufSpans.free.remove(s)
372 work.wbufSpans.busy.insert(s)
373 }
374 unlock(&work.wbufSpans.lock)
375 }
376 if s == nil {
377 systemstack(func() {
378 s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
379 })
380 if s == nil {
381 throw("out of memory")
382 }
383
384 lock(&work.wbufSpans.lock)
385 work.wbufSpans.busy.insert(s)
386 unlock(&work.wbufSpans.lock)
387 }
388
389
390 for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
391 newb := (*workbuf)(unsafe.Pointer(s.base() + i))
392 newb.nobj = 0
393 lfnodeValidate(&newb.node)
394 if i == 0 {
395 b = newb
396 } else {
397 putempty(newb)
398 }
399 }
400 }
401 return b
402 }
403
404
405
406
407
408 func putempty(b *workbuf) {
409 b.checkempty()
410 work.empty.push(&b.node)
411 }
412
413
414
415
416
417
418 func putfull(b *workbuf) {
419 b.checknonempty()
420 work.full.push(&b.node)
421 }
422
423
424
425
426
427 func trygetfull() *workbuf {
428 b := (*workbuf)(work.full.pop())
429 if b != nil {
430 b.checknonempty()
431 return b
432 }
433 return b
434 }
435
436
437 func handoff(b *workbuf) *workbuf {
438
439 b1 := getempty()
440 n := b.nobj / 2
441 b.nobj -= n
442 b1.nobj = n
443 memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
444
445
446 putfull(b)
447 return b1
448 }
449
450
451
452
453 func prepareFreeWorkbufs() {
454 lock(&work.wbufSpans.lock)
455 if work.full != 0 {
456 throw("cannot free workbufs when work.full != 0")
457 }
458
459
460
461 work.empty = 0
462 work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
463 unlock(&work.wbufSpans.lock)
464 }
465
466
467
468 func freeSomeWbufs(preemptible bool) bool {
469 const batchSize = 64
470 lock(&work.wbufSpans.lock)
471 if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
472 unlock(&work.wbufSpans.lock)
473 return false
474 }
475 systemstack(func() {
476 gp := getg().m.curg
477 for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
478 span := work.wbufSpans.free.first
479 if span == nil {
480 break
481 }
482 work.wbufSpans.free.remove(span)
483 mheap_.freeManual(span, spanAllocWorkBuf)
484 }
485 })
486 more := !work.wbufSpans.free.isEmpty()
487 unlock(&work.wbufSpans.lock)
488 return more
489 }
490
View as plain text