Source file src/runtime/mgcpacer.go
1 // Copyright 2021 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "internal/goexperiment" 10 "internal/runtime/atomic" 11 "internal/runtime/math" 12 "internal/strconv" 13 _ "unsafe" 14 ) 15 16 const ( 17 // gcGoalUtilization is the goal CPU utilization for 18 // marking as a fraction of GOMAXPROCS. 19 // 20 // Increasing the goal utilization will shorten GC cycles as the GC 21 // has more resources behind it, lessening costs from the write barrier, 22 // but comes at the cost of increasing mutator latency. 23 gcGoalUtilization = gcBackgroundUtilization 24 25 // gcBackgroundUtilization is the fixed CPU utilization for background 26 // marking. It must be <= gcGoalUtilization. The difference between 27 // gcGoalUtilization and gcBackgroundUtilization will be made up by 28 // mark assists. The scheduler will aim to use within 50% of this 29 // goal. 30 // 31 // As a general rule, there's little reason to set gcBackgroundUtilization 32 // < gcGoalUtilization. One reason might be in mostly idle applications, 33 // where goroutines are unlikely to assist at all, so the actual 34 // utilization will be lower than the goal. But this is moot point 35 // because the idle mark workers already soak up idle CPU resources. 36 // These two values are still kept separate however because they are 37 // distinct conceptually, and in previous iterations of the pacer the 38 // distinction was more important. 39 gcBackgroundUtilization = 0.25 40 41 // gcCreditSlack is the amount of scan work credit that can 42 // accumulate locally before updating gcController.heapScanWork and, 43 // optionally, gcController.bgScanCredit. Lower values give a more 44 // accurate assist ratio and make it more likely that assists will 45 // successfully steal background credit. Higher values reduce memory 46 // contention. 47 gcCreditSlack = 2000 48 49 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 50 // can accumulate on a P before updating gcController.assistTime. 51 gcAssistTimeSlack = 5000 52 53 // gcOverAssistWork determines how many extra units of scan work a GC 54 // assist does when an assist happens. This amortizes the cost of an 55 // assist by pre-paying for this many bytes of future allocations. 56 gcOverAssistWork = 64 << 10 57 58 // defaultHeapMinimum is the value of heapMinimum for GOGC==100. 59 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) + 60 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20) 61 62 // maxStackScanSlack is the bytes of stack space allocated or freed 63 // that can accumulate on a P before updating gcController.stackSize. 64 maxStackScanSlack = 8 << 10 65 66 // memoryLimitMinHeapGoalHeadroom is the minimum amount of headroom the 67 // pacer gives to the heap goal when operating in the memory-limited regime. 68 // That is, it'll reduce the heap goal by this many extra bytes off of the 69 // base calculation, at minimum. 70 memoryLimitMinHeapGoalHeadroom = 1 << 20 71 72 // memoryLimitHeapGoalHeadroomPercent is how headroom the memory-limit-based 73 // heap goal should have as a percent of the maximum possible heap goal allowed 74 // to maintain the memory limit. 75 memoryLimitHeapGoalHeadroomPercent = 3 76 ) 77 78 // gcController implements the GC pacing controller that determines 79 // when to trigger concurrent garbage collection and how much marking 80 // work to do in mutator assists and background marking. 81 // 82 // It calculates the ratio between the allocation rate (in terms of CPU 83 // time) and the GC scan throughput to determine the heap size at which to 84 // trigger a GC cycle such that no GC assists are required to finish on time. 85 // This algorithm thus optimizes GC CPU utilization to the dedicated background 86 // mark utilization of 25% of GOMAXPROCS by minimizing GC assists. 87 // GOMAXPROCS. The high-level design of this algorithm is documented 88 // at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md. 89 // See https://golang.org/s/go15gcpacing for additional historical context. 90 var gcController gcControllerState 91 92 type gcControllerState struct { 93 // Initialized from GOGC. GOGC=off means no GC. 94 gcPercent atomic.Int32 95 96 // memoryLimit is the soft memory limit in bytes. 97 // 98 // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64 99 // which means no soft memory limit in practice. 100 // 101 // This is an int64 instead of a uint64 to more easily maintain parity with 102 // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value 103 // should never be negative. 104 memoryLimit atomic.Int64 105 106 // heapMinimum is the minimum heap size at which to trigger GC. 107 // For small heaps, this overrides the usual GOGC*live set rule. 108 // 109 // When there is a very small live set but a lot of allocation, simply 110 // collecting when the heap reaches GOGC*live results in many GC 111 // cycles and high total per-GC overhead. This minimum amortizes this 112 // per-GC overhead while keeping the heap reasonably small. 113 // 114 // During initialization this is set to 4MB*GOGC/100. In the case of 115 // GOGC==0, this will set heapMinimum to 0, resulting in constant 116 // collection even when the heap size is small, which is useful for 117 // debugging. 118 heapMinimum uint64 119 120 // runway is the amount of runway in heap bytes allocated by the 121 // application that we want to give the GC once it starts. 122 // 123 // This is computed from consMark during mark termination. 124 runway atomic.Uint64 125 126 // consMark is the estimated per-CPU consMark ratio for the application. 127 // 128 // It represents the ratio between the application's allocation 129 // rate, as bytes allocated per CPU-time, and the GC's scan rate, 130 // as bytes scanned per CPU-time. 131 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns). 132 // 133 // At a high level, this value is computed as the bytes of memory 134 // allocated (cons) per unit of scan work completed (mark) in a GC 135 // cycle, divided by the CPU time spent on each activity. 136 // 137 // Updated at the end of each GC cycle, in endCycle. 138 consMark float64 139 140 // lastConsMark is the computed cons/mark value for the previous 4 GC 141 // cycles. Note that this is *not* the last value of consMark, but the 142 // measured cons/mark value in endCycle. 143 lastConsMark [4]float64 144 145 // gcPercentHeapGoal is the goal heapLive for when next GC ends derived 146 // from gcPercent. 147 // 148 // Set to ^uint64(0) if gcPercent is disabled. 149 gcPercentHeapGoal atomic.Uint64 150 151 // sweepDistMinTrigger is the minimum trigger to ensure a minimum 152 // sweep distance. 153 // 154 // This bound is also special because it applies to both the trigger 155 // *and* the goal (all other trigger bounds must be based *on* the goal). 156 // 157 // It is computed ahead of time, at commit time. The theory is that, 158 // absent a sudden change to a parameter like gcPercent, the trigger 159 // will be chosen to always give the sweeper enough headroom. However, 160 // such a change might dramatically and suddenly move up the trigger, 161 // in which case we need to ensure the sweeper still has enough headroom. 162 sweepDistMinTrigger atomic.Uint64 163 164 // triggered is the point at which the current GC cycle actually triggered. 165 // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0). 166 // 167 // Updated while the world is stopped. 168 triggered uint64 169 170 // lastHeapGoal is the value of heapGoal at the moment the last GC 171 // ended. Note that this is distinct from the last value heapGoal had, 172 // because it could change if e.g. gcPercent changes. 173 // 174 // Read and written with the world stopped or with mheap_.lock held. 175 lastHeapGoal uint64 176 177 // heapLive is the number of bytes considered live by the GC. 178 // That is: retained by the most recent GC plus allocated 179 // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since 180 // heapAlloc includes unmarked objects that have not yet been swept (and 181 // hence goes up as we allocate and down as we sweep) while heapLive 182 // excludes these objects (and hence only goes up between GCs). 183 // 184 // To reduce contention, this is updated only when obtaining a span 185 // from an mcentral and at this point it counts all of the unallocated 186 // slots in that span (which will be allocated before that mcache 187 // obtains another span from that mcentral). Hence, it slightly 188 // overestimates the "true" live heap size. It's better to overestimate 189 // than to underestimate because 1) this triggers the GC earlier than 190 // necessary rather than potentially too late and 2) this leads to a 191 // conservative GC rate rather than a GC rate that is potentially too 192 // low. 193 // 194 // Whenever this is updated, call traceHeapAlloc() and 195 // this gcControllerState's revise() method. 196 heapLive atomic.Uint64 197 198 // heapScan is the number of bytes of "scannable" heap. This is the 199 // live heap (as counted by heapLive), but omitting no-scan objects and 200 // no-scan tails of objects. 201 // 202 // This value is fixed at the start of a GC cycle. It represents the 203 // maximum scannable heap. 204 heapScan atomic.Uint64 205 206 // lastHeapScan is the number of bytes of heap that were scanned 207 // last GC cycle. It is the same as heapMarked, but only 208 // includes the "scannable" parts of objects. 209 // 210 // Updated when the world is stopped. 211 lastHeapScan uint64 212 213 // lastStackScan is the number of bytes of stack that were scanned 214 // last GC cycle. 215 lastStackScan atomic.Uint64 216 217 // maxStackScan is the amount of allocated goroutine stack space in 218 // use by goroutines. 219 // 220 // This number tracks allocated goroutine stack space rather than used 221 // goroutine stack space (i.e. what is actually scanned) because used 222 // goroutine stack space is much harder to measure cheaply. By using 223 // allocated space, we make an overestimate; this is OK, it's better 224 // to conservatively overcount than undercount. 225 maxStackScan atomic.Uint64 226 227 // globalsScan is the total amount of global variable space 228 // that is scannable. 229 globalsScan atomic.Uint64 230 231 // heapMarked is the number of bytes marked by the previous 232 // GC. After mark termination, heapLive == heapMarked, but 233 // unlike heapLive, heapMarked does not change until the 234 // next mark termination. 235 heapMarked uint64 236 237 // heapScanWork is the total heap scan work performed this cycle. 238 // stackScanWork is the total stack scan work performed this cycle. 239 // globalsScanWork is the total globals scan work performed this cycle. 240 // 241 // These are updated atomically during the cycle. Updates occur in 242 // bounded batches, since they are both written and read 243 // throughout the cycle. At the end of the cycle, heapScanWork is how 244 // much of the retained heap is scannable. 245 // 246 // Currently these are measured in bytes. For most uses, this is an 247 // opaque unit of work, but for estimation the definition is important. 248 // 249 // Note that stackScanWork includes only stack space scanned, not all 250 // of the allocated stack. 251 heapScanWork atomic.Int64 252 stackScanWork atomic.Int64 253 globalsScanWork atomic.Int64 254 255 // bgScanCredit is the scan work credit accumulated by the concurrent 256 // background scan. This credit is accumulated by the background scan 257 // and stolen by mutator assists. Updates occur in bounded batches, 258 // since it is both written and read throughout the cycle. 259 bgScanCredit atomic.Int64 260 261 // assistTime is the nanoseconds spent in mutator assists 262 // during this cycle. This is updated atomically, and must also 263 // be updated atomically even during a STW, because it is read 264 // by sysmon. Updates occur in bounded batches, since it is both 265 // written and read throughout the cycle. 266 assistTime atomic.Int64 267 268 // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers 269 // during this cycle. This is updated at the end of the concurrent mark 270 // phase. 271 dedicatedMarkTime atomic.Int64 272 273 // fractionalMarkTime is the nanoseconds spent in the fractional mark 274 // worker during this cycle. This is updated throughout the cycle and 275 // will be up-to-date if the fractional mark worker is not currently 276 // running. 277 fractionalMarkTime atomic.Int64 278 279 // idleMarkTime is the nanoseconds spent in idle marking during this 280 // cycle. This is updated throughout the cycle. 281 idleMarkTime atomic.Int64 282 283 // markStartTime is the absolute start time in nanoseconds 284 // that assists and background mark workers started. 285 markStartTime int64 286 287 // dedicatedMarkWorkersNeeded is the number of dedicated mark workers 288 // that need to be started. This is computed at the beginning of each 289 // cycle and decremented as dedicated mark workers get started. 290 dedicatedMarkWorkersNeeded atomic.Int64 291 292 // idleMarkWorkers is two packed int32 values in a single uint64. 293 // These two values are always updated simultaneously. 294 // 295 // The bottom int32 is the current number of idle mark workers executing. 296 // 297 // The top int32 is the maximum number of idle mark workers allowed to 298 // execute concurrently. Normally, this number is just gomaxprocs. However, 299 // during periodic GC cycles it is set to 0 because the system is idle 300 // anyway; there's no need to go full blast on all of GOMAXPROCS. 301 // 302 // The maximum number of idle mark workers is used to prevent new workers 303 // from starting, but it is not a hard maximum. It is possible (but 304 // exceedingly rare) for the current number of idle mark workers to 305 // transiently exceed the maximum. This could happen if the maximum changes 306 // just after a GC ends, and an M with no P. 307 // 308 // Note that if we have no dedicated mark workers, we set this value to 309 // 1 in this case we only have fractional GC workers which aren't scheduled 310 // strictly enough to ensure GC progress. As a result, idle-priority mark 311 // workers are vital to GC progress in these situations. 312 // 313 // For example, consider a situation in which goroutines block on the GC 314 // (such as via runtime.GOMAXPROCS) and only fractional mark workers are 315 // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the 316 // last running M might skip scheduling a fractional mark worker if its 317 // utilization goal is met, such that once it goes to sleep (because there's 318 // nothing to do), there will be nothing else to spin up a new M for the 319 // fractional worker in the future, stalling GC progress and causing a 320 // deadlock. However, idle-priority workers will *always* run when there is 321 // nothing left to do, ensuring the GC makes progress. 322 // 323 // See github.com/golang/go/issues/44163 for more details. 324 idleMarkWorkers atomic.Uint64 325 326 // assistWorkPerByte is the ratio of scan work to allocated 327 // bytes that should be performed by mutator assists. This is 328 // computed at the beginning of each cycle and updated every 329 // time heapScan is updated. 330 assistWorkPerByte atomic.Float64 331 332 // assistBytesPerWork is 1/assistWorkPerByte. 333 // 334 // Note that because this is read and written independently 335 // from assistWorkPerByte users may notice a skew between 336 // the two values, and such a state should be safe. 337 assistBytesPerWork atomic.Float64 338 339 // fractionalUtilizationGoal is the fraction of wall clock 340 // time that should be spent in the fractional mark worker on 341 // each P that isn't running a dedicated worker. 342 // 343 // For example, if the utilization goal is 25% and there are 344 // no dedicated workers, this will be 0.25. If the goal is 345 // 25%, there is one dedicated worker, and GOMAXPROCS is 5, 346 // this will be 0.05 to make up the missing 5%. 347 // 348 // If this is zero, no fractional workers are needed. 349 fractionalUtilizationGoal float64 350 351 // These memory stats are effectively duplicates of fields from 352 // memstats.heapStats but are updated atomically or with the world 353 // stopped and don't provide the same consistency guarantees. 354 // 355 // Because the runtime is responsible for managing a memory limit, it's 356 // useful to couple these stats more tightly to the gcController, which 357 // is intimately connected to how that memory limit is maintained. 358 heapInUse sysMemStat // bytes in mSpanInUse spans 359 heapReleased sysMemStat // bytes released to the OS 360 heapFree sysMemStat // bytes not in any span, but not released to the OS 361 totalAlloc atomic.Uint64 // total bytes allocated 362 totalFree atomic.Uint64 // total bytes freed 363 mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go). 364 365 // test indicates that this is a test-only copy of gcControllerState. 366 test bool 367 368 _ cpu.CacheLinePad 369 } 370 371 func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) { 372 c.heapMinimum = defaultHeapMinimum 373 c.triggered = ^uint64(0) 374 c.setGCPercent(gcPercent) 375 c.setMemoryLimit(memoryLimit) 376 c.commit(true) // No sweep phase in the first GC cycle. 377 // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at 378 // initialization time. 379 // N.B. No need to call revise; there's no GC enabled during 380 // initialization. 381 } 382 383 // startCycle resets the GC controller's state and computes estimates 384 // for a new GC cycle. The caller must hold worldsema and the world 385 // must be stopped. 386 func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) { 387 c.heapScanWork.Store(0) 388 c.stackScanWork.Store(0) 389 c.globalsScanWork.Store(0) 390 c.bgScanCredit.Store(0) 391 c.assistTime.Store(0) 392 c.dedicatedMarkTime.Store(0) 393 c.fractionalMarkTime.Store(0) 394 c.idleMarkTime.Store(0) 395 c.markStartTime = markStartTime 396 c.triggered = c.heapLive.Load() 397 398 // Compute the background mark utilization goal. In general, 399 // this may not come out exactly. We round the number of 400 // dedicated workers so that the utilization is closest to 401 // 25%. For small GOMAXPROCS, this would introduce too much 402 // error, so we add fractional workers in that case. 403 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization 404 dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5) 405 utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1 406 const maxUtilError = 0.3 407 if utilError < -maxUtilError || utilError > maxUtilError { 408 // Rounding put us more than 30% off our goal. With 409 // gcBackgroundUtilization of 25%, this happens for 410 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional 411 // workers to compensate. 412 if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal { 413 // Too many dedicated workers. 414 dedicatedMarkWorkersNeeded-- 415 } 416 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs) 417 } else { 418 c.fractionalUtilizationGoal = 0 419 } 420 421 // In STW mode, we just want dedicated workers. 422 if debug.gcstoptheworld > 0 { 423 dedicatedMarkWorkersNeeded = int64(procs) 424 c.fractionalUtilizationGoal = 0 425 } 426 427 // Clear per-P state 428 for _, p := range allp { 429 p.gcAssistTime = 0 430 p.gcFractionalMarkTime.Store(0) 431 } 432 433 if trigger.kind == gcTriggerTime { 434 // During a periodic GC cycle, reduce the number of idle mark workers 435 // required. However, we need at least one dedicated mark worker or 436 // idle GC worker to ensure GC progress in some scenarios (see comment 437 // on maxIdleMarkWorkers). 438 if dedicatedMarkWorkersNeeded > 0 { 439 c.setMaxIdleMarkWorkers(0) 440 } else { 441 // TODO(mknyszek): The fundamental reason why we need this is because 442 // we can't count on the fractional mark worker to get scheduled. 443 // Fix that by ensuring it gets scheduled according to its quota even 444 // if the rest of the application is idle. 445 c.setMaxIdleMarkWorkers(1) 446 } 447 } else { 448 // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to 449 // change during a GC cycle. 450 c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded)) 451 } 452 453 // Compute initial values for controls that are updated 454 // throughout the cycle. 455 c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded) 456 c.revise() 457 458 if debug.gcpacertrace > 0 { 459 heapGoal := c.heapGoal() 460 assistRatio := c.assistWorkPerByte.Load() 461 print("pacer: assist ratio=", assistRatio, 462 " (scan ", gcController.heapScan.Load()>>20, " MB in ", 463 work.initialHeapLive>>20, "->", 464 heapGoal>>20, " MB)", 465 " workers=", dedicatedMarkWorkersNeeded, 466 "+", c.fractionalUtilizationGoal, "\n") 467 } 468 } 469 470 // revise updates the assist ratio during the GC cycle to account for 471 // improved estimates. This should be called whenever gcController.heapScan, 472 // gcController.heapLive, or if any inputs to gcController.heapGoal are 473 // updated. It is safe to call concurrently, but it may race with other 474 // calls to revise. 475 // 476 // The result of this race is that the two assist ratio values may not line 477 // up or may be stale. In practice this is OK because the assist ratio 478 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort 479 // heuristic anyway. Furthermore, no part of the heuristic depends on 480 // the two assist ratio values being exact reciprocals of one another, since 481 // the two values are used to convert values from different sources. 482 // 483 // The worst case result of this raciness is that we may miss a larger shift 484 // in the ratio (say, if we decide to pace more aggressively against the 485 // hard heap goal) but even this "hard goal" is best-effort (see #40460). 486 // The dedicated GC should ensure we don't exceed the hard goal by too much 487 // in the rare case we do exceed it. 488 // 489 // It should only be called when gcBlackenEnabled != 0 (because this 490 // is when assists are enabled and the necessary statistics are 491 // available). 492 func (c *gcControllerState) revise() { 493 gcPercent := c.gcPercent.Load() 494 if gcPercent < 0 { 495 // If GC is disabled but we're running a forced GC, 496 // act like GOGC is huge for the below calculations. 497 gcPercent = 100000 498 } 499 live := c.heapLive.Load() 500 scan := c.heapScan.Load() 501 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 502 503 // Assume we're under the soft goal. Pace GC to complete at 504 // heapGoal assuming the heap is in steady-state. 505 heapGoal := int64(c.heapGoal()) 506 507 // The expected scan work is computed as the amount of bytes scanned last 508 // GC cycle (both heap and stack), plus our estimate of globals work for this cycle. 509 scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load()) 510 511 // maxScanWork is a worst-case estimate of the amount of scan work that 512 // needs to be performed in this GC cycle. Specifically, it represents 513 // the case where *all* scannable memory turns out to be live, and 514 // *all* allocated stack space is scannable. 515 maxStackScan := c.maxStackScan.Load() 516 maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load()) 517 if work > scanWorkExpected { 518 // We've already done more scan work than expected. Because our expectation 519 // is based on a steady-state scannable heap size, we assume this means our 520 // heap is growing. Compute a new heap goal that takes our existing runway 521 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case 522 // scan work. This keeps our assist ratio stable if the heap continues to grow. 523 // 524 // The effect of this mechanism is that assists stay flat in the face of heap 525 // growths. It's OK to use more memory this cycle to scan all the live heap, 526 // because the next GC cycle is inevitably going to use *at least* that much 527 // memory anyway. 528 extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered) 529 scanWorkExpected = maxScanWork 530 531 // hardGoal is a hard limit on the amount that we're willing to push back the 532 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or 533 // stacks and/or globals grow to twice their size, this limits the current GC cycle's 534 // growth to 4x the original live heap's size). 535 // 536 // This maintains the invariant that we use no more memory than the next GC cycle 537 // will anyway. 538 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal)) 539 if extHeapGoal > hardGoal { 540 extHeapGoal = hardGoal 541 } 542 heapGoal = extHeapGoal 543 } 544 if int64(live) > heapGoal { 545 // We're already past our heap goal, even the extrapolated one. 546 // Leave ourselves some extra runway, so in the worst case we 547 // finish by that point. 548 const maxOvershoot = 1.1 549 heapGoal = int64(float64(heapGoal) * maxOvershoot) 550 551 // Compute the upper bound on the scan work remaining. 552 scanWorkExpected = maxScanWork 553 } 554 555 // Compute the remaining scan work estimate. 556 // 557 // Note that we currently count allocations during GC as both 558 // scannable heap (heapScan) and scan work completed 559 // (scanWork), so allocation will change this difference 560 // slowly in the soft regime and not at all in the hard 561 // regime. 562 scanWorkRemaining := scanWorkExpected - work 563 if scanWorkRemaining < 1000 { 564 // We set a somewhat arbitrary lower bound on 565 // remaining scan work since if we aim a little high, 566 // we can miss by a little. 567 // 568 // We *do* need to enforce that this is at least 1, 569 // since marking is racy and double-scanning objects 570 // may legitimately make the remaining scan work 571 // negative, even in the hard goal regime. 572 scanWorkRemaining = 1000 573 } 574 575 // Compute the heap distance remaining. 576 heapRemaining := heapGoal - int64(live) 577 if heapRemaining <= 0 { 578 // This shouldn't happen, but if it does, avoid 579 // dividing by zero or setting the assist negative. 580 heapRemaining = 1 581 } 582 583 // Compute the mutator assist ratio so by the time the mutator 584 // allocates the remaining heap bytes up to heapGoal, it will 585 // have done (or stolen) the remaining amount of scan work. 586 // Note that the assist ratio values are updated atomically 587 // but not together. This means there may be some degree of 588 // skew between the two values. This is generally OK as the 589 // values shift relatively slowly over the course of a GC 590 // cycle. 591 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining) 592 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining) 593 c.assistWorkPerByte.Store(assistWorkPerByte) 594 c.assistBytesPerWork.Store(assistBytesPerWork) 595 } 596 597 // endCycle computes the consMark estimate for the next cycle. 598 // userForced indicates whether the current GC cycle was forced 599 // by the application. 600 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) { 601 // Record last heap goal for the scavenger. 602 // We'll be updating the heap goal soon. 603 gcController.lastHeapGoal = c.heapGoal() 604 605 // Compute the duration of time for which assists were turned on. 606 assistDuration := now - c.markStartTime 607 608 // Assume background mark hit its utilization goal. 609 utilization := gcBackgroundUtilization 610 // Add assist utilization; avoid divide by zero. 611 if assistDuration > 0 { 612 utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs)) 613 } 614 615 if c.heapLive.Load() <= c.triggered { 616 // Shouldn't happen, but let's be very safe about this in case the 617 // GC is somehow extremely short. 618 // 619 // In this case though, the only reasonable value for c.heapLive-c.triggered 620 // would be 0, which isn't really all that useful, i.e. the GC was so short 621 // that it didn't matter. 622 // 623 // Ignore this case and don't update anything. 624 return 625 } 626 idleUtilization := 0.0 627 if assistDuration > 0 { 628 idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs)) 629 } 630 // Determine the cons/mark ratio. 631 // 632 // The units we want for the numerator and denominator are both B / cpu-ns. 633 // We get this by taking the bytes allocated or scanned, and divide by the amount of 634 // CPU time it took for those operations. For allocations, that CPU time is 635 // 636 // assistDuration * procs * (1 - utilization) 637 // 638 // Where utilization includes just background GC workers and assists. It does *not* 639 // include idle GC work time, because in theory the mutator is free to take that at 640 // any point. 641 // 642 // For scanning, that CPU time is 643 // 644 // assistDuration * procs * (utilization + idleUtilization) 645 // 646 // In this case, we *include* idle utilization, because that is additional CPU time that 647 // the GC had available to it. 648 // 649 // In effect, idle GC time is sort of double-counted here, but it's very weird compared 650 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is 651 // *always* free to take it. 652 // 653 // So this calculation is really: 654 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) / 655 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)) 656 // 657 // Note that because we only care about the ratio, assistDuration and procs cancel out. 658 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 659 currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) / 660 (float64(scanWork) * (1 - utilization)) 661 662 // Update our cons/mark estimate. This is the maximum of the value we just computed and the last 663 // 4 cons/mark values we measured. The reason we take the maximum here is to bias a noisy 664 // cons/mark measurement toward fewer assists at the expense of additional GC cycles (starting 665 // earlier). 666 oldConsMark := c.consMark 667 c.consMark = currentConsMark 668 for i := range c.lastConsMark { 669 if c.lastConsMark[i] > c.consMark { 670 c.consMark = c.lastConsMark[i] 671 } 672 } 673 copy(c.lastConsMark[:], c.lastConsMark[1:]) 674 c.lastConsMark[len(c.lastConsMark)-1] = currentConsMark 675 676 if debug.gcpacertrace > 0 { 677 printlock() 678 goal := gcGoalUtilization * 100 679 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ") 680 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ") 681 live := c.heapLive.Load() 682 print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")") 683 println() 684 printunlock() 685 } 686 } 687 688 // enlistWorker encourages another dedicated mark worker to start on 689 // another P if there are spare worker slots. It is used by putfull 690 // when more work is made available. 691 // 692 // If goexperiment.GreenTeaGC, the caller must not hold a G's scan bit, 693 // otherwise this could cause a deadlock. This is already enforced by 694 // the static lock ranking. 695 // 696 //go:nowritebarrier 697 func (c *gcControllerState) enlistWorker() { 698 needDedicated := c.dedicatedMarkWorkersNeeded.Load() > 0 699 700 // Create new workers from idle Ps with goexperiment.GreenTeaGC. 701 // 702 // Note: with Green Tea, this places a requirement on enlistWorker 703 // that it must not be called while a G's scan bit is held. 704 if goexperiment.GreenTeaGC { 705 needIdle := c.needIdleMarkWorker() 706 707 // If we're all full on dedicated and idle workers, nothing 708 // to do. 709 if !needDedicated && !needIdle { 710 return 711 } 712 713 // If there are idle Ps, wake one so it will run a worker 714 // (the scheduler will already prefer to spin up a new 715 // dedicated worker over an idle one). 716 if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 { 717 wakep() // Likely to consume our worker request. 718 return 719 } 720 } 721 722 // If we still need more dedicated workers, try to preempt a running P 723 // so it will switch to a worker. 724 if !needDedicated { 725 return 726 } 727 728 // Pick a random other P to preempt. 729 if gomaxprocs <= 1 { 730 return 731 } 732 gp := getg() 733 if gp == nil || gp.m == nil || gp.m.p == 0 { 734 return 735 } 736 myID := gp.m.p.ptr().id 737 for tries := 0; tries < 5; tries++ { 738 id := int32(cheaprandn(uint32(gomaxprocs - 1))) 739 if id >= myID { 740 id++ 741 } 742 p := allp[id] 743 if p.status != _Prunning { 744 continue 745 } 746 if preemptone(p) { 747 return 748 } 749 } 750 } 751 752 // assignWaitingGCWorker assigns a background mark worker to pp if one should 753 // be run. 754 // 755 // If a worker is selected, it is assigned to pp.nextMarkGCWorker and the P is 756 // wired as a GC mark worker. The G is still in _Gwaiting. If no worker is 757 // selected, ok returns false. 758 // 759 // If assignedWaitingGCWorker returns true, this P must either: 760 // - Mark the G as runnable and run it, clearing pp.nextMarkGCWorker. 761 // - Or, call c.releaseNextGCMarkWorker. 762 // 763 // This must only be called when gcBlackenEnabled != 0. 764 func (c *gcControllerState) assignWaitingGCWorker(pp *p, now int64) (bool, int64) { 765 if gcBlackenEnabled == 0 { 766 throw("gcControllerState.findRunnable: blackening not enabled") 767 } 768 769 if now == 0 { 770 now = nanotime() 771 } 772 773 if !gcShouldScheduleWorker(pp) { 774 // No good reason to schedule a worker. This can happen at 775 // the end of the mark phase when there are still 776 // assists tapering off. Don't bother running a worker 777 // now because it'll just return immediately. 778 return false, now 779 } 780 781 if c.dedicatedMarkWorkersNeeded.Load() <= 0 && c.fractionalUtilizationGoal == 0 { 782 // No current need for dedicated workers, and no need at all for 783 // fractional workers. Check before trying to acquire a worker; when 784 // GOMAXPROCS is large, that can be expensive and is often unnecessary. 785 // 786 // When a dedicated worker stops running, the gcBgMarkWorker loop notes 787 // the need for the worker before returning it to the pool. If we don't 788 // see the need now, we wouldn't have found it in the pool anyway. 789 return false, now 790 } 791 792 // Grab a worker before we commit to running below. 793 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 794 if node == nil { 795 // There is at least one worker per P, so normally there are 796 // enough workers to run on all Ps, if necessary. However, once 797 // a worker enters gcMarkDone it may park without rejoining the 798 // pool, thus freeing a P with no corresponding worker. 799 // gcMarkDone never depends on another worker doing work, so it 800 // is safe to simply do nothing here. 801 // 802 // If gcMarkDone bails out without completing the mark phase, 803 // it will always do so with queued global work. Thus, that P 804 // will be immediately eligible to re-run the worker G it was 805 // just using, ensuring work can complete. 806 return false, now 807 } 808 809 decIfPositive := func(val *atomic.Int64) bool { 810 for { 811 v := val.Load() 812 if v <= 0 { 813 return false 814 } 815 816 if val.CompareAndSwap(v, v-1) { 817 return true 818 } 819 } 820 } 821 822 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 823 // This P is now dedicated to marking until the end of 824 // the concurrent mark phase. 825 pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 826 } else if c.fractionalUtilizationGoal == 0 { 827 // No need for fractional workers. 828 gcBgMarkWorkerPool.push(&node.node) 829 return false, now 830 } else { 831 // Is this P behind on the fractional utilization 832 // goal? 833 // 834 // This should be kept in sync with pollFractionalWorkerExit. 835 delta := now - c.markStartTime 836 if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal { 837 // Nope. No need to run a fractional worker. 838 gcBgMarkWorkerPool.push(&node.node) 839 return false, now 840 } 841 // Run a fractional worker. 842 pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode 843 } 844 845 pp.nextGCMarkWorker = node 846 return true, now 847 } 848 849 // findRunnableGCWorker returns a background mark worker for pp if it 850 // should be run. 851 // 852 // If findRunnableGCWorker returns a G, this P is wired as a GC mark worker and 853 // must run the G. 854 // 855 // This must only be called when gcBlackenEnabled != 0. 856 // 857 // This function is allowed to have write barriers because it is called from 858 // the portion of findRunnable that always has a P. 859 // 860 //go:yeswritebarrierrec 861 func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { 862 // Since we have the current time, check if the GC CPU limiter 863 // hasn't had an update in a while. This check is necessary in 864 // case the limiter is on but hasn't been checked in a while and 865 // so may have left sufficient headroom to turn off again. 866 if now == 0 { 867 now = nanotime() 868 } 869 if gcCPULimiter.needUpdate(now) { 870 gcCPULimiter.update(now) 871 } 872 873 // If a worker wasn't already assigned by procresize, assign one now. 874 if pp.nextGCMarkWorker == nil { 875 ok, now := c.assignWaitingGCWorker(pp, now) 876 if !ok { 877 return nil, now 878 } 879 } 880 881 node := pp.nextGCMarkWorker 882 pp.nextGCMarkWorker = nil 883 884 // Run the background mark worker. 885 gp := node.gp.ptr() 886 trace := traceAcquire() 887 casgstatus(gp, _Gwaiting, _Grunnable) 888 if trace.ok() { 889 trace.GoUnpark(gp, 0) 890 traceRelease(trace) 891 } 892 return gp, now 893 } 894 895 // Release an unused pp.nextGCMarkWorker, if any. 896 // 897 // This function is allowed to have write barriers because it is called from 898 // the portion of schedule. 899 // 900 //go:yeswritebarrierrec 901 func (c *gcControllerState) releaseNextGCMarkWorker(pp *p) { 902 node := pp.nextGCMarkWorker 903 if node == nil { 904 return 905 } 906 907 c.markWorkerStop(pp.gcMarkWorkerMode, 0) 908 gcBgMarkWorkerPool.push(&node.node) 909 pp.nextGCMarkWorker = nil 910 } 911 912 // resetLive sets up the controller state for the next mark phase after the end 913 // of the previous one. Must be called after endCycle and before commit, before 914 // the world is started. 915 // 916 // The world must be stopped. 917 func (c *gcControllerState) resetLive(bytesMarked uint64) { 918 c.heapMarked = bytesMarked 919 c.heapLive.Store(bytesMarked) 920 c.heapScan.Store(uint64(c.heapScanWork.Load())) 921 c.lastHeapScan = uint64(c.heapScanWork.Load()) 922 c.lastStackScan.Store(uint64(c.stackScanWork.Load())) 923 c.triggered = ^uint64(0) // Reset triggered. 924 925 // heapLive was updated, so emit a trace event. 926 trace := traceAcquire() 927 if trace.ok() { 928 trace.HeapAlloc(bytesMarked) 929 traceRelease(trace) 930 } 931 } 932 933 // markWorkerStop must be called whenever a mark worker stops executing. 934 // 935 // It updates mark work accounting in the controller by a duration of 936 // work in nanoseconds and other bookkeeping. 937 // 938 // Safe to execute at any time. 939 func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) { 940 switch mode { 941 case gcMarkWorkerDedicatedMode: 942 c.dedicatedMarkTime.Add(duration) 943 c.dedicatedMarkWorkersNeeded.Add(1) 944 case gcMarkWorkerFractionalMode: 945 c.fractionalMarkTime.Add(duration) 946 case gcMarkWorkerIdleMode: 947 c.idleMarkTime.Add(duration) 948 c.removeIdleMarkWorker() 949 default: 950 throw("markWorkerStop: unknown mark worker mode") 951 } 952 } 953 954 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { 955 if dHeapLive != 0 { 956 trace := traceAcquire() 957 live := gcController.heapLive.Add(dHeapLive) 958 if trace.ok() { 959 // gcController.heapLive changed. 960 trace.HeapAlloc(live) 961 traceRelease(trace) 962 } 963 } 964 if gcBlackenEnabled == 0 { 965 // Update heapScan when we're not in a current GC. It is fixed 966 // at the beginning of a cycle. 967 if dHeapScan != 0 { 968 gcController.heapScan.Add(dHeapScan) 969 } 970 } else { 971 // gcController.heapLive changed. 972 c.revise() 973 } 974 } 975 976 func (c *gcControllerState) addScannableStack(pp *p, amount int64) { 977 if pp == nil { 978 c.maxStackScan.Add(amount) 979 return 980 } 981 pp.maxStackScanDelta += amount 982 if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack { 983 c.maxStackScan.Add(pp.maxStackScanDelta) 984 pp.maxStackScanDelta = 0 985 } 986 } 987 988 func (c *gcControllerState) addGlobals(amount int64) { 989 c.globalsScan.Add(amount) 990 } 991 992 // heapGoal returns the current heap goal. 993 func (c *gcControllerState) heapGoal() uint64 { 994 goal, _ := c.heapGoalInternal() 995 return goal 996 } 997 998 // heapGoalInternal is the implementation of heapGoal which returns additional 999 // information that is necessary for computing the trigger. 1000 // 1001 // The returned minTrigger is always <= goal. 1002 func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) { 1003 // Start with the goal calculated for gcPercent. 1004 goal = c.gcPercentHeapGoal.Load() 1005 1006 // Check if the memory-limit-based goal is smaller, and if so, pick that. 1007 if newGoal := c.memoryLimitHeapGoal(); newGoal < goal { 1008 goal = newGoal 1009 } else { 1010 // We're not limited by the memory limit goal, so perform a series of 1011 // adjustments that might move the goal forward in a variety of circumstances. 1012 1013 sweepDistTrigger := c.sweepDistMinTrigger.Load() 1014 if sweepDistTrigger > goal { 1015 // Set the goal to maintain a minimum sweep distance since 1016 // the last call to commit. Note that we never want to do this 1017 // if we're in the memory limit regime, because it could push 1018 // the goal up. 1019 goal = sweepDistTrigger 1020 } 1021 // Since we ignore the sweep distance trigger in the memory 1022 // limit regime, we need to ensure we don't propagate it to 1023 // the trigger, because it could cause a violation of the 1024 // invariant that the trigger < goal. 1025 minTrigger = sweepDistTrigger 1026 1027 // Ensure that the heap goal is at least a little larger than 1028 // the point at which we triggered. This may not be the case if GC 1029 // start is delayed or if the allocation that pushed gcController.heapLive 1030 // over trigger is large or if the trigger is really close to 1031 // GOGC. Assist is proportional to this distance, so enforce a 1032 // minimum distance, even if it means going over the GOGC goal 1033 // by a tiny bit. 1034 // 1035 // Ignore this if we're in the memory limit regime: we'd prefer to 1036 // have the GC respond hard about how close we are to the goal than to 1037 // push the goal back in such a manner that it could cause us to exceed 1038 // the memory limit. 1039 const minRunway = 64 << 10 1040 if c.triggered != ^uint64(0) && goal < c.triggered+minRunway { 1041 goal = c.triggered + minRunway 1042 } 1043 } 1044 return 1045 } 1046 1047 // memoryLimitHeapGoal returns a heap goal derived from memoryLimit. 1048 func (c *gcControllerState) memoryLimitHeapGoal() uint64 { 1049 // Start by pulling out some values we'll need. Be careful about overflow. 1050 var heapFree, heapAlloc, mappedReady uint64 1051 for { 1052 heapFree = c.heapFree.load() // Free and unscavenged memory. 1053 heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use. 1054 mappedReady = c.mappedReady.Load() // Total unreleased mapped memory. 1055 if heapFree+heapAlloc <= mappedReady { 1056 break 1057 } 1058 // It is impossible for total unreleased mapped memory to exceed heap memory, but 1059 // because these stats are updated independently, we may observe a partial update 1060 // including only some values. Thus, we appear to break the invariant. However, 1061 // this condition is necessarily transient, so just try again. In the case of a 1062 // persistent accounting error, we'll deadlock here. 1063 } 1064 1065 // Below we compute a goal from memoryLimit. There are a few things to be aware of. 1066 // Firstly, the memoryLimit does not easily compare to the heap goal: the former 1067 // is total mapped memory by the runtime that hasn't been released, while the latter is 1068 // only heap object memory. Intuitively, the way we convert from one to the other is to 1069 // subtract everything from memoryLimit that both contributes to the memory limit (so, 1070 // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what 1071 // lines up with reality, but it's a good starting point. 1072 // 1073 // In practice this computation looks like the following: 1074 // 1075 // goal := memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) 1076 // ^1 ^2 1077 // goal -= goal / 100 * memoryLimitHeapGoalHeadroomPercent 1078 // ^3 1079 // 1080 // Let's break this down. 1081 // 1082 // The first term (marker 1) is everything that contributes to the memory limit and isn't 1083 // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads. 1084 // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged 1085 // memory that may contain heap objects in the future. 1086 // 1087 // Let's take a step back. In an ideal world, this term would look something like just 1088 // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap 1089 // goal, and subtract out everything else. This is of course impossible; the definition 1090 // is circular! However, this impossible definition contains a key insight: the amount 1091 // we're *going* to use matters just as much as whatever we're currently using. 1092 // 1093 // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and 1094 // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free 1095 // and unscavenged memory, pushing the goal down significantly. 1096 // 1097 // heapFree is also safe to exclude from the memory limit because in the steady-state, it's 1098 // just a pool of memory for future heap allocations, and making new allocations from heapFree 1099 // memory doesn't increase overall memory use. In transient states, the scavenger and the 1100 // allocator actively manage the pool of heapFree memory to maintain the memory limit. 1101 // 1102 // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is 1103 // intended to help recover from such a situation. By pushing the heap goal down, we also 1104 // push the trigger down, triggering and finishing a GC sooner in order to make room for 1105 // other memory sources. Note that since we're effectively reducing the heap goal by X bytes, 1106 // we're actually giving more than X bytes of headroom back, because the heap goal is in 1107 // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store 1108 // X bytes worth of objects. 1109 // 1110 // The final adjustment (marker 3) reduces the maximum possible memory limit heap goal by 1111 // memoryLimitHeapGoalPercent. As the name implies, this is to provide additional headroom in 1112 // the face of pacing inaccuracies, and also to leave a buffer of unscavenged memory so the 1113 // allocator isn't constantly scavenging. The reduction amount also has a fixed minimum 1114 // (memoryLimitMinHeapGoalHeadroom, not pictured) because the aforementioned pacing inaccuracies 1115 // disproportionately affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. 1116 // Shorter GC cycles and less GC work means noisy external factors like the OS scheduler have a 1117 // greater impact. 1118 1119 memoryLimit := uint64(c.memoryLimit.Load()) 1120 1121 // Compute term 1. 1122 nonHeapMemory := mappedReady - heapFree - heapAlloc 1123 1124 // Compute term 2. 1125 var overage uint64 1126 if mappedReady > memoryLimit { 1127 overage = mappedReady - memoryLimit 1128 } 1129 1130 if nonHeapMemory+overage >= memoryLimit { 1131 // We're at a point where non-heap memory exceeds the memory limit on its own. 1132 // There's honestly not much we can do here but just trigger GCs continuously 1133 // and let the CPU limiter reign that in. Something has to give at this point. 1134 // Set it to heapMarked, the lowest possible goal. 1135 return c.heapMarked 1136 } 1137 1138 // Compute the goal. 1139 goal := memoryLimit - (nonHeapMemory + overage) 1140 1141 // Apply some headroom to the goal to account for pacing inaccuracies and to reduce 1142 // the impact of scavenging at allocation time in response to a high allocation rate 1143 // when GOGC=off. See issue #57069. Also, be careful about small limits. 1144 headroom := goal / 100 * memoryLimitHeapGoalHeadroomPercent 1145 if headroom < memoryLimitMinHeapGoalHeadroom { 1146 // Set a fixed minimum to deal with the particularly large effect pacing inaccuracies 1147 // have for smaller heaps. 1148 headroom = memoryLimitMinHeapGoalHeadroom 1149 } 1150 if goal < headroom || goal-headroom < headroom { 1151 goal = headroom 1152 } else { 1153 goal = goal - headroom 1154 } 1155 // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense. 1156 if goal < c.heapMarked { 1157 goal = c.heapMarked 1158 } 1159 return goal 1160 } 1161 1162 const ( 1163 // These constants determine the bounds on the GC trigger as a fraction 1164 // of heap bytes allocated between the start of a GC (heapLive == heapMarked) 1165 // and the end of a GC (heapLive == heapGoal). 1166 // 1167 // The constants are obscured in this way for efficiency. The denominator 1168 // of the fraction is always a power-of-two for a quick division, so that 1169 // the numerator is a single constant integer multiplication. 1170 triggerRatioDen = 64 1171 1172 // The minimum trigger constant was chosen empirically: given a sufficiently 1173 // fast/scalable allocator with 48 Ps that could drive the trigger ratio 1174 // to <0.05, this constant causes applications to retain the same peak 1175 // RSS compared to not having this allocator. 1176 minTriggerRatioNum = 45 // ~0.7 1177 1178 // The maximum trigger constant is chosen somewhat arbitrarily, but the 1179 // current constant has served us well over the years. 1180 maxTriggerRatioNum = 61 // ~0.95 1181 ) 1182 1183 // trigger returns the current point at which a GC should trigger along with 1184 // the heap goal. 1185 // 1186 // The returned value may be compared against heapLive to determine whether 1187 // the GC should trigger. Thus, the GC trigger condition should be (but may 1188 // not be, in the case of small movements for efficiency) checked whenever 1189 // the heap goal may change. 1190 func (c *gcControllerState) trigger() (uint64, uint64) { 1191 goal, minTrigger := c.heapGoalInternal() 1192 1193 // Invariant: the trigger must always be less than the heap goal. 1194 // 1195 // Note that the memory limit sets a hard maximum on our heap goal, 1196 // but the live heap may grow beyond it. 1197 1198 if c.heapMarked >= goal { 1199 // The goal should never be smaller than heapMarked, but let's be 1200 // defensive about it. The only reasonable trigger here is one that 1201 // causes a continuous GC cycle at heapMarked, but respect the goal 1202 // if it came out as smaller than that. 1203 return goal, goal 1204 } 1205 1206 // Below this point, c.heapMarked < goal. 1207 1208 // heapMarked is our absolute minimum, and it's possible the trigger 1209 // bound we get from heapGoalinternal is less than that. 1210 if minTrigger < c.heapMarked { 1211 minTrigger = c.heapMarked 1212 } 1213 1214 // If we let the trigger go too low, then if the application 1215 // is allocating very rapidly we might end up in a situation 1216 // where we're allocating black during a nearly always-on GC. 1217 // The result of this is a growing heap and ultimately an 1218 // increase in RSS. By capping us at a point >0, we're essentially 1219 // saying that we're OK using more CPU during the GC to prevent 1220 // this growth in RSS. 1221 triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked 1222 if minTrigger < triggerLowerBound { 1223 minTrigger = triggerLowerBound 1224 } 1225 1226 // For small heaps, set the max trigger point at maxTriggerRatio of the way 1227 // from the live heap to the heap goal. This ensures we always have *some* 1228 // headroom when the GC actually starts. For larger heaps, set the max trigger 1229 // point at the goal, minus the minimum heap size. 1230 // 1231 // This choice follows from the fact that the minimum heap size is chosen 1232 // to reflect the costs of a GC with no work to do. With a large heap but 1233 // very little scan work to perform, this gives us exactly as much runway 1234 // as we would need, in the worst case. 1235 maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked 1236 if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger { 1237 maxTrigger = goal - defaultHeapMinimum 1238 } 1239 maxTrigger = max(maxTrigger, minTrigger) 1240 1241 // Compute the trigger from our bounds and the runway stored by commit. 1242 var trigger uint64 1243 runway := c.runway.Load() 1244 if runway > goal { 1245 trigger = minTrigger 1246 } else { 1247 trigger = goal - runway 1248 } 1249 trigger = max(trigger, minTrigger) 1250 trigger = min(trigger, maxTrigger) 1251 if trigger > goal { 1252 print("trigger=", trigger, " heapGoal=", goal, "\n") 1253 print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n") 1254 throw("produced a trigger greater than the heap goal") 1255 } 1256 return trigger, goal 1257 } 1258 1259 // commit recomputes all pacing parameters needed to derive the 1260 // trigger and the heap goal. Namely, the gcPercent-based heap goal, 1261 // and the amount of runway we want to give the GC this cycle. 1262 // 1263 // This can be called any time. If GC is the in the middle of a 1264 // concurrent phase, it will adjust the pacing of that phase. 1265 // 1266 // isSweepDone should be the result of calling isSweepDone(), 1267 // unless we're testing or we know we're executing during a GC cycle. 1268 // 1269 // This depends on gcPercent, gcController.heapMarked, and 1270 // gcController.heapLive. These must be up to date. 1271 // 1272 // Callers must call gcControllerState.revise after calling this 1273 // function if the GC is enabled. 1274 // 1275 // mheap_.lock must be held or the world must be stopped. 1276 func (c *gcControllerState) commit(isSweepDone bool) { 1277 if !c.test { 1278 assertWorldStoppedOrLockHeld(&mheap_.lock) 1279 } 1280 1281 if isSweepDone { 1282 // The sweep is done, so there aren't any restrictions on the trigger 1283 // we need to think about. 1284 c.sweepDistMinTrigger.Store(0) 1285 } else { 1286 // Concurrent sweep happens in the heap growth 1287 // from gcController.heapLive to trigger. Make sure we 1288 // give the sweeper some runway if it doesn't have enough. 1289 c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance) 1290 } 1291 1292 // Compute the next GC goal, which is when the allocated heap 1293 // has grown by GOGC/100 over where it started the last cycle, 1294 // plus additional runway for non-heap sources of GC work. 1295 gcPercentHeapGoal := ^uint64(0) 1296 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 { 1297 gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100 1298 } 1299 // Apply the minimum heap size here. It's defined in terms of gcPercent 1300 // and is only updated by functions that call commit. 1301 if gcPercentHeapGoal < c.heapMinimum { 1302 gcPercentHeapGoal = c.heapMinimum 1303 } 1304 c.gcPercentHeapGoal.Store(gcPercentHeapGoal) 1305 1306 // Compute the amount of runway we want the GC to have by using our 1307 // estimate of the cons/mark ratio. 1308 // 1309 // The idea is to take our expected scan work, and multiply it by 1310 // the cons/mark ratio to determine how long it'll take to complete 1311 // that scan work in terms of bytes allocated. This gives us our GC's 1312 // runway. 1313 // 1314 // However, the cons/mark ratio is a ratio of rates per CPU-second, but 1315 // here we care about the relative rates for some division of CPU 1316 // resources among the mutator and the GC. 1317 // 1318 // To summarize, we have B / cpu-ns, and we want B / ns. We get that 1319 // by multiplying by our desired division of CPU resources. We choose 1320 // to express CPU resources as GOMAPROCS*fraction. Note that because 1321 // we're working with a ratio here, we can omit the number of CPU cores, 1322 // because they'll appear in the numerator and denominator and cancel out. 1323 // As a result, this is basically just "weighing" the cons/mark ratio by 1324 // our desired division of resources. 1325 // 1326 // Furthermore, by setting the runway so that CPU resources are divided 1327 // this way, assuming that the cons/mark ratio is correct, we make that 1328 // division a reality. 1329 c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load()))) 1330 } 1331 1332 // setGCPercent updates gcPercent. commit must be called after. 1333 // Returns the old value of gcPercent. 1334 // 1335 // The world must be stopped, or mheap_.lock must be held. 1336 func (c *gcControllerState) setGCPercent(in int32) int32 { 1337 if !c.test { 1338 assertWorldStoppedOrLockHeld(&mheap_.lock) 1339 } 1340 1341 out := c.gcPercent.Load() 1342 if in < 0 { 1343 in = -1 1344 } 1345 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100 1346 c.gcPercent.Store(in) 1347 1348 return out 1349 } 1350 1351 //go:linkname setGCPercent runtime/debug.setGCPercent 1352 func setGCPercent(in int32) (out int32) { 1353 // Run on the system stack since we grab the heap lock. 1354 systemstack(func() { 1355 lock(&mheap_.lock) 1356 out = gcController.setGCPercent(in) 1357 gcControllerCommit() 1358 unlock(&mheap_.lock) 1359 }) 1360 1361 // If we just disabled GC, wait for any concurrent GC mark to 1362 // finish so we always return with no GC running. 1363 if in < 0 { 1364 gcWaitOnMark(work.cycles.Load()) 1365 } 1366 1367 return out 1368 } 1369 1370 func readGOGC() int32 { 1371 p := gogetenv("GOGC") 1372 if p == "off" { 1373 return -1 1374 } 1375 if n, err := strconv.ParseInt(p, 10, 32); err == nil { 1376 return int32(n) 1377 } 1378 return 100 1379 } 1380 1381 // setMemoryLimit updates memoryLimit. commit must be called after 1382 // Returns the old value of memoryLimit. 1383 // 1384 // The world must be stopped, or mheap_.lock must be held. 1385 func (c *gcControllerState) setMemoryLimit(in int64) int64 { 1386 if !c.test { 1387 assertWorldStoppedOrLockHeld(&mheap_.lock) 1388 } 1389 1390 out := c.memoryLimit.Load() 1391 if in >= 0 { 1392 c.memoryLimit.Store(in) 1393 } 1394 1395 return out 1396 } 1397 1398 //go:linkname setMemoryLimit runtime/debug.setMemoryLimit 1399 func setMemoryLimit(in int64) (out int64) { 1400 // Run on the system stack since we grab the heap lock. 1401 systemstack(func() { 1402 lock(&mheap_.lock) 1403 out = gcController.setMemoryLimit(in) 1404 if in < 0 || out == in { 1405 // If we're just checking the value or not changing 1406 // it, there's no point in doing the rest. 1407 unlock(&mheap_.lock) 1408 return 1409 } 1410 gcControllerCommit() 1411 unlock(&mheap_.lock) 1412 }) 1413 return out 1414 } 1415 1416 func readGOMEMLIMIT() int64 { 1417 p := gogetenv("GOMEMLIMIT") 1418 if p == "" || p == "off" { 1419 return math.MaxInt64 1420 } 1421 n, ok := parseByteCount(p) 1422 if !ok { 1423 print("GOMEMLIMIT=", p, "\n") 1424 throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`") 1425 } 1426 return n 1427 } 1428 1429 // addIdleMarkWorker attempts to add a new idle mark worker. 1430 // 1431 // If this returns true, the caller must become an idle mark worker unless 1432 // there's no background mark worker goroutines in the pool. This case is 1433 // harmless because there are already background mark workers running. 1434 // If this returns false, the caller must NOT become an idle mark worker. 1435 // 1436 // nosplit because it may be called without a P. 1437 // 1438 //go:nosplit 1439 func (c *gcControllerState) addIdleMarkWorker() bool { 1440 for { 1441 old := c.idleMarkWorkers.Load() 1442 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1443 if n >= max { 1444 // See the comment on idleMarkWorkers for why 1445 // n > max is tolerated. 1446 return false 1447 } 1448 if n < 0 { 1449 print("n=", n, " max=", max, "\n") 1450 throw("negative idle mark workers") 1451 } 1452 new := uint64(uint32(n+1)) | (uint64(max) << 32) 1453 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1454 return true 1455 } 1456 } 1457 } 1458 1459 // needIdleMarkWorker is a hint as to whether another idle mark worker is needed. 1460 // 1461 // The caller must still call addIdleMarkWorker to become one. This is mainly 1462 // useful for a quick check before an expensive operation. 1463 // 1464 // nosplit because it may be called without a P. 1465 // 1466 //go:nosplit 1467 func (c *gcControllerState) needIdleMarkWorker() bool { 1468 p := c.idleMarkWorkers.Load() 1469 n, max := int32(p&uint64(^uint32(0))), int32(p>>32) 1470 return n < max 1471 } 1472 1473 // removeIdleMarkWorker must be called when a new idle mark worker stops executing. 1474 func (c *gcControllerState) removeIdleMarkWorker() { 1475 for { 1476 old := c.idleMarkWorkers.Load() 1477 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1478 if n-1 < 0 { 1479 print("n=", n, " max=", max, "\n") 1480 throw("negative idle mark workers") 1481 } 1482 new := uint64(uint32(n-1)) | (uint64(max) << 32) 1483 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1484 return 1485 } 1486 } 1487 } 1488 1489 // setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed. 1490 // 1491 // This method is optimistic in that it does not wait for the number of 1492 // idle mark workers to reduce to max before returning; it assumes the workers 1493 // will deschedule themselves. 1494 func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) { 1495 for { 1496 old := c.idleMarkWorkers.Load() 1497 n := int32(old & uint64(^uint32(0))) 1498 if n < 0 { 1499 print("n=", n, " max=", max, "\n") 1500 throw("negative idle mark workers") 1501 } 1502 new := uint64(uint32(n)) | (uint64(max) << 32) 1503 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1504 return 1505 } 1506 } 1507 } 1508 1509 // gcControllerCommit is gcController.commit, but passes arguments from live 1510 // (non-test) data. It also updates any consumers of the GC pacing, such as 1511 // sweep pacing and the background scavenger. 1512 // 1513 // Calls gcController.commit. 1514 // 1515 // The heap lock must be held, so this must be executed on the system stack. 1516 // 1517 //go:systemstack 1518 func gcControllerCommit() { 1519 assertWorldStoppedOrLockHeld(&mheap_.lock) 1520 1521 gcController.commit(isSweepDone()) 1522 1523 // Update mark pacing. 1524 if gcphase != _GCoff { 1525 gcController.revise() 1526 } 1527 1528 // TODO(mknyszek): This isn't really accurate any longer because the heap 1529 // goal is computed dynamically. Still useful to snapshot, but not as useful. 1530 trace := traceAcquire() 1531 if trace.ok() { 1532 trace.HeapGoal() 1533 traceRelease(trace) 1534 } 1535 1536 trigger, heapGoal := gcController.trigger() 1537 gcPaceSweeper(trigger) 1538 gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal) 1539 } 1540