Source file src/runtime/malloc.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goos" 106 "internal/runtime/atomic" 107 "internal/runtime/math" 108 "internal/runtime/sys" 109 "unsafe" 110 ) 111 112 const ( 113 maxTinySize = _TinySize 114 tinySizeClass = _TinySizeClass 115 maxSmallSize = _MaxSmallSize 116 117 pageShift = _PageShift 118 pageSize = _PageSize 119 120 _PageSize = 1 << _PageShift 121 _PageMask = _PageSize - 1 122 123 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 124 _64bit = 1 << (^uintptr(0) >> 63) / 2 125 126 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 127 _TinySize = 16 128 _TinySizeClass = int8(2) 129 130 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 131 132 // Per-P, per order stack segment cache size. 133 _StackCacheSize = 32 * 1024 134 135 // Number of orders that get caching. Order 0 is FixedStack 136 // and each successive order is twice as large. 137 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 138 // will be allocated directly. 139 // Since FixedStack is different on different systems, we 140 // must vary NumStackOrders to keep the same maximum cached size. 141 // OS | FixedStack | NumStackOrders 142 // -----------------+------------+--------------- 143 // linux/darwin/bsd | 2KB | 4 144 // windows/32 | 4KB | 3 145 // windows/64 | 8KB | 2 146 // plan9 | 4KB | 3 147 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 148 149 // heapAddrBits is the number of bits in a heap address. On 150 // amd64, addresses are sign-extended beyond heapAddrBits. On 151 // other arches, they are zero-extended. 152 // 153 // On most 64-bit platforms, we limit this to 48 bits based on a 154 // combination of hardware and OS limitations. 155 // 156 // amd64 hardware limits addresses to 48 bits, sign-extended 157 // to 64 bits. Addresses where the top 16 bits are not either 158 // all 0 or all 1 are "non-canonical" and invalid. Because of 159 // these "negative" addresses, we offset addresses by 1<<47 160 // (arenaBaseOffset) on amd64 before computing indexes into 161 // the heap arenas index. In 2017, amd64 hardware added 162 // support for 57 bit addresses; however, currently only Linux 163 // supports this extension and the kernel will never choose an 164 // address above 1<<47 unless mmap is called with a hint 165 // address above 1<<47 (which we never do). 166 // 167 // arm64 hardware (as of ARMv8) limits user addresses to 48 168 // bits, in the range [0, 1<<48). 169 // 170 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 171 // in hardware. On Linux, Go leans on stricter OS limits. Based 172 // on Linux's processor.h, the user address space is limited as 173 // follows on 64-bit architectures: 174 // 175 // Architecture Name Maximum Value (exclusive) 176 // --------------------------------------------------------------------- 177 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 178 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 179 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 180 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 181 // s390x TASK_SIZE 1<<64 (64 bit addresses) 182 // 183 // These limits may increase over time, but are currently at 184 // most 48 bits except on s390x. On all architectures, Linux 185 // starts placing mmap'd regions at addresses that are 186 // significantly below 48 bits, so even if it's possible to 187 // exceed Go's 48 bit limit, it's extremely unlikely in 188 // practice. 189 // 190 // On 32-bit platforms, we accept the full 32-bit address 191 // space because doing so is cheap. 192 // mips32 only has access to the low 2GB of virtual memory, so 193 // we further limit it to 31 bits. 194 // 195 // On ios/arm64, although 64-bit pointers are presumably 196 // available, pointers are truncated to 33 bits in iOS <14. 197 // Furthermore, only the top 4 GiB of the address space are 198 // actually available to the application. In iOS >=14, more 199 // of the address space is available, and the OS can now 200 // provide addresses outside of those 33 bits. Pick 40 bits 201 // as a reasonable balance between address space usage by the 202 // page allocator, and flexibility for what mmap'd regions 203 // we'll accept for the heap. We can't just move to the full 204 // 48 bits because this uses too much address space for older 205 // iOS versions. 206 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 207 // to a 48-bit address space like every other arm64 platform. 208 // 209 // WebAssembly currently has a limit of 4GB linear memory. 210 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 211 212 // maxAlloc is the maximum size of an allocation. On 64-bit, 213 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 214 // 32-bit, however, this is one less than 1<<32 because the 215 // number of bytes in the address space doesn't actually fit 216 // in a uintptr. 217 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 218 219 // The number of bits in a heap address, the size of heap 220 // arenas, and the L1 and L2 arena map sizes are related by 221 // 222 // (1 << addr bits) = arena size * L1 entries * L2 entries 223 // 224 // Currently, we balance these as follows: 225 // 226 // Platform Addr bits Arena size L1 entries L2 entries 227 // -------------- --------- ---------- ---------- ----------- 228 // */64-bit 48 64MB 1 4M (32MB) 229 // windows/64-bit 48 4MB 64 1M (8MB) 230 // ios/arm64 40 4MB 1 256K (2MB) 231 // */32-bit 32 4MB 1 1024 (4KB) 232 // */mips(le) 31 4MB 1 512 (2KB) 233 234 // heapArenaBytes is the size of a heap arena. The heap 235 // consists of mappings of size heapArenaBytes, aligned to 236 // heapArenaBytes. The initial heap mapping is one arena. 237 // 238 // This is currently 64MB on 64-bit non-Windows and 4MB on 239 // 32-bit and on Windows. We use smaller arenas on Windows 240 // because all committed memory is charged to the process, 241 // even if it's not touched. Hence, for processes with small 242 // heaps, the mapped arena space needs to be commensurate. 243 // This is particularly important with the race detector, 244 // since it significantly amplifies the cost of committed 245 // memory. 246 heapArenaBytes = 1 << logHeapArenaBytes 247 248 heapArenaWords = heapArenaBytes / goarch.PtrSize 249 250 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 251 // prefer using heapArenaBytes where possible (we need the 252 // constant to compute some other constants). 253 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 254 255 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs. 256 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize) 257 258 pagesPerArena = heapArenaBytes / pageSize 259 260 // arenaL1Bits is the number of bits of the arena number 261 // covered by the first level arena map. 262 // 263 // This number should be small, since the first level arena 264 // map requires PtrSize*(1<<arenaL1Bits) of space in the 265 // binary's BSS. It can be zero, in which case the first level 266 // index is effectively unused. There is a performance benefit 267 // to this, since the generated code can be more efficient, 268 // but comes at the cost of having a large L2 mapping. 269 // 270 // We use the L1 map on 64-bit Windows because the arena size 271 // is small, but the address space is still 48 bits, and 272 // there's a high cost to having a large L2. 273 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 274 275 // arenaL2Bits is the number of bits of the arena number 276 // covered by the second level arena index. 277 // 278 // The size of each arena map allocation is proportional to 279 // 1<<arenaL2Bits, so it's important that this not be too 280 // large. 48 bits leads to 32MB arena index allocations, which 281 // is about the practical threshold. 282 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 283 284 // arenaL1Shift is the number of bits to shift an arena frame 285 // number by to compute an index into the first level arena map. 286 arenaL1Shift = arenaL2Bits 287 288 // arenaBits is the total bits in a combined arena map index. 289 // This is split between the index into the L1 arena map and 290 // the L2 arena map. 291 arenaBits = arenaL1Bits + arenaL2Bits 292 293 // arenaBaseOffset is the pointer value that corresponds to 294 // index 0 in the heap arena map. 295 // 296 // On amd64, the address space is 48 bits, sign extended to 64 297 // bits. This offset lets us handle "negative" addresses (or 298 // high addresses if viewed as unsigned). 299 // 300 // On aix/ppc64, this offset allows to keep the heapAddrBits to 301 // 48. Otherwise, it would be 60 in order to handle mmap addresses 302 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 303 // case, the memory reserved in (s *pageAlloc).init for chunks 304 // is causing important slowdowns. 305 // 306 // On other platforms, the user address space is contiguous 307 // and starts at 0, so no offset is necessary. 308 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 309 // A typed version of this constant that will make it into DWARF (for viewcore). 310 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 311 312 // Max number of threads to run garbage collection. 313 // 2, 3, and 4 are all plausible maximums depending 314 // on the hardware details of the machine. The garbage 315 // collector scales well to 32 cpus. 316 _MaxGcproc = 32 317 318 // minLegalPointer is the smallest possible legal pointer. 319 // This is the smallest possible architectural page size, 320 // since we assume that the first page is never mapped. 321 // 322 // This should agree with minZeroPage in the compiler. 323 minLegalPointer uintptr = 4096 324 325 // minHeapForMetadataHugePages sets a threshold on when certain kinds of 326 // heap metadata, currently the arenas map L2 entries and page alloc bitmap 327 // mappings, are allowed to be backed by huge pages. If the heap goal ever 328 // exceeds this threshold, then huge pages are enabled. 329 // 330 // These numbers are chosen with the assumption that huge pages are on the 331 // order of a few MiB in size. 332 // 333 // The kind of metadata this applies to has a very low overhead when compared 334 // to address space used, but their constant overheads for small heaps would 335 // be very high if they were to be backed by huge pages (e.g. a few MiB makes 336 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB 337 // heap). The benefit of huge pages is also not worth it for small heaps, 338 // because only a very, very small part of the metadata is used for small heaps. 339 // 340 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size 341 // again, then huge pages will still be enabled for this mapping. The reason is that 342 // there's no point unless we're also returning the physical memory for these 343 // metadata mappings back to the OS. That would be quite complex to do in general 344 // as the heap is likely fragmented after a reduction in heap size. 345 minHeapForMetadataHugePages = 1 << 30 346 ) 347 348 // physPageSize is the size in bytes of the OS's physical pages. 349 // Mapping and unmapping operations must be done at multiples of 350 // physPageSize. 351 // 352 // This must be set by the OS init code (typically in osinit) before 353 // mallocinit. 354 var physPageSize uintptr 355 356 // physHugePageSize is the size in bytes of the OS's default physical huge 357 // page size whose allocation is opaque to the application. It is assumed 358 // and verified to be a power of two. 359 // 360 // If set, this must be set by the OS init code (typically in osinit) before 361 // mallocinit. However, setting it at all is optional, and leaving the default 362 // value is always safe (though potentially less efficient). 363 // 364 // Since physHugePageSize is always assumed to be a power of two, 365 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 366 // The purpose of physHugePageShift is to avoid doing divisions in 367 // performance critical functions. 368 var ( 369 physHugePageSize uintptr 370 physHugePageShift uint 371 ) 372 373 func mallocinit() { 374 if class_to_size[_TinySizeClass] != _TinySize { 375 throw("bad TinySizeClass") 376 } 377 378 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 { 379 // heapBits expects modular arithmetic on bitmap 380 // addresses to work. 381 throw("heapArenaBitmapWords not a power of 2") 382 } 383 384 // Check physPageSize. 385 if physPageSize == 0 { 386 // The OS init code failed to fetch the physical page size. 387 throw("failed to get system page size") 388 } 389 if physPageSize > maxPhysPageSize { 390 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 391 throw("bad system page size") 392 } 393 if physPageSize < minPhysPageSize { 394 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 395 throw("bad system page size") 396 } 397 if physPageSize&(physPageSize-1) != 0 { 398 print("system page size (", physPageSize, ") must be a power of 2\n") 399 throw("bad system page size") 400 } 401 if physHugePageSize&(physHugePageSize-1) != 0 { 402 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 403 throw("bad system huge page size") 404 } 405 if physHugePageSize > maxPhysHugePageSize { 406 // physHugePageSize is greater than the maximum supported huge page size. 407 // Don't throw here, like in the other cases, since a system configured 408 // in this way isn't wrong, we just don't have the code to support them. 409 // Instead, silently set the huge page size to zero. 410 physHugePageSize = 0 411 } 412 if physHugePageSize != 0 { 413 // Since physHugePageSize is a power of 2, it suffices to increase 414 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 415 for 1<<physHugePageShift != physHugePageSize { 416 physHugePageShift++ 417 } 418 } 419 if pagesPerArena%pagesPerSpanRoot != 0 { 420 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 421 throw("bad pagesPerSpanRoot") 422 } 423 if pagesPerArena%pagesPerReclaimerChunk != 0 { 424 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 425 throw("bad pagesPerReclaimerChunk") 426 } 427 // Check that the minimum size (exclusive) for a malloc header is also 428 // a size class boundary. This is important to making sure checks align 429 // across different parts of the runtime. 430 // 431 // While we're here, also check to make sure all these size classes' 432 // span sizes are one page. Some code relies on this. 433 minSizeForMallocHeaderIsSizeClass := false 434 sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true 435 for i := 0; i < len(class_to_size); i++ { 436 if class_to_allocnpages[i] > 1 { 437 sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false 438 } 439 if minSizeForMallocHeader == uintptr(class_to_size[i]) { 440 minSizeForMallocHeaderIsSizeClass = true 441 break 442 } 443 } 444 if !minSizeForMallocHeaderIsSizeClass { 445 throw("min size of malloc header is not a size class boundary") 446 } 447 if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage { 448 throw("expected all size classes up to min size for malloc header to fit in one-page spans") 449 } 450 // Check that the pointer bitmap for all small sizes without a malloc header 451 // fits in a word. 452 if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize { 453 throw("max pointer/scan bitmap size for headerless objects is too large") 454 } 455 456 if minTagBits > taggedPointerBits { 457 throw("taggedPointerBits too small") 458 } 459 460 // Initialize the heap. 461 mheap_.init() 462 mcache0 = allocmcache() 463 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 464 lockInit(&profInsertLock, lockRankProfInsert) 465 lockInit(&profBlockLock, lockRankProfBlock) 466 lockInit(&profMemActiveLock, lockRankProfMemActive) 467 for i := range profMemFutureLock { 468 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 469 } 470 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 471 472 // Create initial arena growth hints. 473 if isSbrkPlatform { 474 // Don't generate hints on sbrk platforms. We can 475 // only grow the break sequentially. 476 } else if goarch.PtrSize == 8 { 477 // On a 64-bit machine, we pick the following hints 478 // because: 479 // 480 // 1. Starting from the middle of the address space 481 // makes it easier to grow out a contiguous range 482 // without running in to some other mapping. 483 // 484 // 2. This makes Go heap addresses more easily 485 // recognizable when debugging. 486 // 487 // 3. Stack scanning in gccgo is still conservative, 488 // so it's important that addresses be distinguishable 489 // from other data. 490 // 491 // Starting at 0x00c0 means that the valid memory addresses 492 // will begin 0x00c0, 0x00c1, ... 493 // In little-endian, that's c0 00, c1 00, ... None of those are valid 494 // UTF-8 sequences, and they are otherwise as far away from 495 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 496 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 497 // on OS X during thread allocations. 0x00c0 causes conflicts with 498 // AddressSanitizer which reserves all memory up to 0x0100. 499 // These choices reduce the odds of a conservative garbage collector 500 // not collecting memory because some non-pointer block of memory 501 // had a bit pattern that matched a memory address. 502 // 503 // However, on arm64, we ignore all this advice above and slam the 504 // allocation at 0x40 << 32 because when using 4k pages with 3-level 505 // translation buffers, the user address space is limited to 39 bits 506 // On ios/arm64, the address space is even smaller. 507 // 508 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 509 // processes. 510 // 511 // Space mapped for user arenas comes immediately after the range 512 // originally reserved for the regular heap when race mode is not 513 // enabled because user arena chunks can never be used for regular heap 514 // allocations and we want to avoid fragmenting the address space. 515 // 516 // In race mode we have no choice but to just use the same hints because 517 // the race detector requires that the heap be mapped contiguously. 518 for i := 0x7f; i >= 0; i-- { 519 var p uintptr 520 switch { 521 case raceenabled: 522 // The TSAN runtime requires the heap 523 // to be in the range [0x00c000000000, 524 // 0x00e000000000). 525 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 526 if p >= uintptrMask&0x00e000000000 { 527 continue 528 } 529 case GOARCH == "arm64" && GOOS == "ios": 530 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 531 case GOARCH == "arm64": 532 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 533 case GOOS == "aix": 534 if i == 0 { 535 // We don't use addresses directly after 0x0A00000000000000 536 // to avoid collisions with others mmaps done by non-go programs. 537 continue 538 } 539 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 540 default: 541 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 542 } 543 // Switch to generating hints for user arenas if we've gone 544 // through about half the hints. In race mode, take only about 545 // a quarter; we don't have very much space to work with. 546 hintList := &mheap_.arenaHints 547 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { 548 hintList = &mheap_.userArena.arenaHints 549 } 550 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 551 hint.addr = p 552 hint.next, *hintList = *hintList, hint 553 } 554 } else { 555 // On a 32-bit machine, we're much more concerned 556 // about keeping the usable heap contiguous. 557 // Hence: 558 // 559 // 1. We reserve space for all heapArenas up front so 560 // they don't get interleaved with the heap. They're 561 // ~258MB, so this isn't too bad. (We could reserve a 562 // smaller amount of space up front if this is a 563 // problem.) 564 // 565 // 2. We hint the heap to start right above the end of 566 // the binary so we have the best chance of keeping it 567 // contiguous. 568 // 569 // 3. We try to stake out a reasonably large initial 570 // heap reservation. 571 572 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 573 meta := uintptr(sysReserve(nil, arenaMetaSize)) 574 if meta != 0 { 575 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 576 } 577 578 // We want to start the arena low, but if we're linked 579 // against C code, it's possible global constructors 580 // have called malloc and adjusted the process' brk. 581 // Query the brk so we can avoid trying to map the 582 // region over it (which will cause the kernel to put 583 // the region somewhere else, likely at a high 584 // address). 585 procBrk := sbrk0() 586 587 // If we ask for the end of the data segment but the 588 // operating system requires a little more space 589 // before we can start allocating, it will give out a 590 // slightly higher pointer. Except QEMU, which is 591 // buggy, as usual: it won't adjust the pointer 592 // upward. So adjust it upward a little bit ourselves: 593 // 1/4 MB to get away from the running binary image. 594 p := firstmoduledata.end 595 if p < procBrk { 596 p = procBrk 597 } 598 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 599 p = mheap_.heapArenaAlloc.end 600 } 601 p = alignUp(p+(256<<10), heapArenaBytes) 602 // Because we're worried about fragmentation on 603 // 32-bit, we try to make a large initial reservation. 604 arenaSizes := []uintptr{ 605 512 << 20, 606 256 << 20, 607 128 << 20, 608 } 609 for _, arenaSize := range arenaSizes { 610 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 611 if a != nil { 612 mheap_.arena.init(uintptr(a), size, false) 613 p = mheap_.arena.end // For hint below 614 break 615 } 616 } 617 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 618 hint.addr = p 619 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 620 621 // Place the hint for user arenas just after the large reservation. 622 // 623 // While this potentially competes with the hint above, in practice we probably 624 // aren't going to be getting this far anyway on 32-bit platforms. 625 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 626 userArenaHint.addr = p 627 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint 628 } 629 // Initialize the memory limit here because the allocator is going to look at it 630 // but we haven't called gcinit yet and we're definitely going to allocate memory before then. 631 gcController.memoryLimit.Store(maxInt64) 632 } 633 634 // sysAlloc allocates heap arena space for at least n bytes. The 635 // returned pointer is always heapArenaBytes-aligned and backed by 636 // h.arenas metadata. The returned size is always a multiple of 637 // heapArenaBytes. sysAlloc returns nil on failure. 638 // There is no corresponding free function. 639 // 640 // hintList is a list of hint addresses for where to allocate new 641 // heap arenas. It must be non-nil. 642 // 643 // register indicates whether the heap arena should be registered 644 // in allArenas. 645 // 646 // sysAlloc returns a memory region in the Reserved state. This region must 647 // be transitioned to Prepared and then Ready before use. 648 // 649 // h must be locked. 650 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) { 651 assertLockHeld(&h.lock) 652 653 n = alignUp(n, heapArenaBytes) 654 655 if hintList == &h.arenaHints { 656 // First, try the arena pre-reservation. 657 // Newly-used mappings are considered released. 658 // 659 // Only do this if we're using the regular heap arena hints. 660 // This behavior is only for the heap. 661 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) 662 if v != nil { 663 size = n 664 goto mapped 665 } 666 } 667 668 // Try to grow the heap at a hint address. 669 for *hintList != nil { 670 hint := *hintList 671 p := hint.addr 672 if hint.down { 673 p -= n 674 } 675 if p+n < p { 676 // We can't use this, so don't ask. 677 v = nil 678 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 679 // Outside addressable heap. Can't use. 680 v = nil 681 } else { 682 v = sysReserve(unsafe.Pointer(p), n) 683 } 684 if p == uintptr(v) { 685 // Success. Update the hint. 686 if !hint.down { 687 p += n 688 } 689 hint.addr = p 690 size = n 691 break 692 } 693 // Failed. Discard this hint and try the next. 694 // 695 // TODO: This would be cleaner if sysReserve could be 696 // told to only return the requested address. In 697 // particular, this is already how Windows behaves, so 698 // it would simplify things there. 699 if v != nil { 700 sysFreeOS(v, n) 701 } 702 *hintList = hint.next 703 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 704 } 705 706 if size == 0 { 707 if raceenabled { 708 // The race detector assumes the heap lives in 709 // [0x00c000000000, 0x00e000000000), but we 710 // just ran out of hints in this region. Give 711 // a nice failure. 712 throw("too many address space collisions for -race mode") 713 } 714 715 // All of the hints failed, so we'll take any 716 // (sufficiently aligned) address the kernel will give 717 // us. 718 v, size = sysReserveAligned(nil, n, heapArenaBytes) 719 if v == nil { 720 return nil, 0 721 } 722 723 // Create new hints for extending this region. 724 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 725 hint.addr, hint.down = uintptr(v), true 726 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 727 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 728 hint.addr = uintptr(v) + size 729 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 730 } 731 732 // Check for bad pointers or pointers we can't use. 733 { 734 var bad string 735 p := uintptr(v) 736 if p+size < p { 737 bad = "region exceeds uintptr range" 738 } else if arenaIndex(p) >= 1<<arenaBits { 739 bad = "base outside usable address space" 740 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 741 bad = "end outside usable address space" 742 } 743 if bad != "" { 744 // This should be impossible on most architectures, 745 // but it would be really confusing to debug. 746 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 747 throw("memory reservation exceeds address space limit") 748 } 749 } 750 751 if uintptr(v)&(heapArenaBytes-1) != 0 { 752 throw("misrounded allocation in sysAlloc") 753 } 754 755 mapped: 756 // Create arena metadata. 757 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 758 l2 := h.arenas[ri.l1()] 759 if l2 == nil { 760 // Allocate an L2 arena map. 761 // 762 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 763 // statistic we can comfortably account for this space in. With this structure, 764 // we rely on demand paging to avoid large overheads, but tracking which memory 765 // is paged in is too expensive. Trying to account for the whole region means 766 // that it will appear like an enormous memory overhead in statistics, even though 767 // it is not. 768 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2))) 769 if l2 == nil { 770 throw("out of memory allocating heap arena map") 771 } 772 if h.arenasHugePages { 773 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 774 } else { 775 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 776 } 777 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 778 } 779 780 if l2[ri.l2()] != nil { 781 throw("arena already initialized") 782 } 783 var r *heapArena 784 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 785 if r == nil { 786 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 787 if r == nil { 788 throw("out of memory allocating heap arena metadata") 789 } 790 } 791 792 // Register the arena in allArenas if requested. 793 if register { 794 if len(h.allArenas) == cap(h.allArenas) { 795 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize 796 if size == 0 { 797 size = physPageSize 798 } 799 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 800 if newArray == nil { 801 throw("out of memory allocating allArenas") 802 } 803 oldSlice := h.allArenas 804 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)} 805 copy(h.allArenas, oldSlice) 806 // Do not free the old backing array because 807 // there may be concurrent readers. Since we 808 // double the array each time, this can lead 809 // to at most 2x waste. 810 } 811 h.allArenas = h.allArenas[:len(h.allArenas)+1] 812 h.allArenas[len(h.allArenas)-1] = ri 813 } 814 815 // Store atomically just in case an object from the 816 // new heap arena becomes visible before the heap lock 817 // is released (which shouldn't happen, but there's 818 // little downside to this). 819 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 820 } 821 822 // Tell the race detector about the new heap memory. 823 if raceenabled { 824 racemapshadow(v, size) 825 } 826 827 return 828 } 829 830 // sysReserveAligned is like sysReserve, but the returned pointer is 831 // aligned to align bytes. It may reserve either n or n+align bytes, 832 // so it returns the size that was reserved. 833 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 834 if isSbrkPlatform { 835 if v != nil { 836 throw("unexpected heap arena hint on sbrk platform") 837 } 838 return sysReserveAlignedSbrk(size, align) 839 } 840 // Since the alignment is rather large in uses of this 841 // function, we're not likely to get it by chance, so we ask 842 // for a larger region and remove the parts we don't need. 843 retries := 0 844 retry: 845 p := uintptr(sysReserve(v, size+align)) 846 switch { 847 case p == 0: 848 return nil, 0 849 case p&(align-1) == 0: 850 return unsafe.Pointer(p), size + align 851 case GOOS == "windows": 852 // On Windows we can't release pieces of a 853 // reservation, so we release the whole thing and 854 // re-reserve the aligned sub-region. This may race, 855 // so we may have to try again. 856 sysFreeOS(unsafe.Pointer(p), size+align) 857 p = alignUp(p, align) 858 p2 := sysReserve(unsafe.Pointer(p), size) 859 if p != uintptr(p2) { 860 // Must have raced. Try again. 861 sysFreeOS(p2, size) 862 if retries++; retries == 100 { 863 throw("failed to allocate aligned heap memory; too many retries") 864 } 865 goto retry 866 } 867 // Success. 868 return p2, size 869 default: 870 // Trim off the unaligned parts. 871 pAligned := alignUp(p, align) 872 sysFreeOS(unsafe.Pointer(p), pAligned-p) 873 end := pAligned + size 874 endLen := (p + size + align) - end 875 if endLen > 0 { 876 sysFreeOS(unsafe.Pointer(end), endLen) 877 } 878 return unsafe.Pointer(pAligned), size 879 } 880 } 881 882 // enableMetadataHugePages enables huge pages for various sources of heap metadata. 883 // 884 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 885 // time, but may take time proportional to the size of the mapped heap beyond that. 886 // 887 // This function is idempotent. 888 // 889 // The heap lock must not be held over this operation, since it will briefly acquire 890 // the heap lock. 891 // 892 // Must be called on the system stack because it acquires the heap lock. 893 // 894 //go:systemstack 895 func (h *mheap) enableMetadataHugePages() { 896 // Enable huge pages for page structure. 897 h.pages.enableChunkHugePages() 898 899 // Grab the lock and set arenasHugePages if it's not. 900 // 901 // Once arenasHugePages is set, all new L2 entries will be eligible for 902 // huge pages. We'll set all the old entries after we release the lock. 903 lock(&h.lock) 904 if h.arenasHugePages { 905 unlock(&h.lock) 906 return 907 } 908 h.arenasHugePages = true 909 unlock(&h.lock) 910 911 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to 912 // just iterate over the whole thing. 913 for i := range h.arenas { 914 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) 915 if l2 == nil { 916 continue 917 } 918 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 919 } 920 } 921 922 // base address for all 0-byte allocations 923 var zerobase uintptr 924 925 // nextFreeFast returns the next free object if one is quickly available. 926 // Otherwise it returns 0. 927 func nextFreeFast(s *mspan) gclinkptr { 928 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 929 if theBit < 64 { 930 result := s.freeindex + uint16(theBit) 931 if result < s.nelems { 932 freeidx := result + 1 933 if freeidx%64 == 0 && freeidx != s.nelems { 934 return 0 935 } 936 s.allocCache >>= uint(theBit + 1) 937 s.freeindex = freeidx 938 s.allocCount++ 939 return gclinkptr(uintptr(result)*s.elemsize + s.base()) 940 } 941 } 942 return 0 943 } 944 945 // nextFree returns the next free object from the cached span if one is available. 946 // Otherwise it refills the cache with a span with an available object and 947 // returns that object along with a flag indicating that this was a heavy 948 // weight allocation. If it is a heavy weight allocation the caller must 949 // determine whether a new GC cycle needs to be started or if the GC is active 950 // whether this goroutine needs to assist the GC. 951 // 952 // Must run in a non-preemptible context since otherwise the owner of 953 // c could change. 954 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 955 s = c.alloc[spc] 956 checkGCTrigger = false 957 freeIndex := s.nextFreeIndex() 958 if freeIndex == s.nelems { 959 // The span is full. 960 if s.allocCount != s.nelems { 961 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 962 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 963 } 964 c.refill(spc) 965 checkGCTrigger = true 966 s = c.alloc[spc] 967 968 freeIndex = s.nextFreeIndex() 969 } 970 971 if freeIndex >= s.nelems { 972 throw("freeIndex is not valid") 973 } 974 975 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) 976 s.allocCount++ 977 if s.allocCount > s.nelems { 978 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 979 throw("s.allocCount > s.nelems") 980 } 981 return 982 } 983 984 // doubleCheckMalloc enables a bunch of extra checks to malloc to double-check 985 // that various invariants are upheld. 986 // 987 // We might consider turning these on by default; many of them previously were. 988 // They account for a few % of mallocgc's cost though, which does matter somewhat 989 // at scale. 990 const doubleCheckMalloc = false 991 992 // Allocate an object of size bytes. 993 // Small objects are allocated from the per-P cache's free lists. 994 // Large objects (> 32 kB) are allocated straight from the heap. 995 // 996 // mallocgc should be an internal detail, 997 // but widely used packages access it using linkname. 998 // Notable members of the hall of shame include: 999 // - github.com/bytedance/gopkg 1000 // - github.com/bytedance/sonic 1001 // - github.com/cloudwego/frugal 1002 // - github.com/cockroachdb/cockroach 1003 // - github.com/cockroachdb/pebble 1004 // - github.com/ugorji/go/codec 1005 // 1006 // Do not remove or change the type signature. 1007 // See go.dev/issue/67401. 1008 // 1009 //go:linkname mallocgc 1010 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 1011 if doubleCheckMalloc { 1012 if gcphase == _GCmarktermination { 1013 throw("mallocgc called with gcphase == _GCmarktermination") 1014 } 1015 } 1016 1017 // Short-circuit zero-sized allocation requests. 1018 if size == 0 { 1019 return unsafe.Pointer(&zerobase) 1020 } 1021 1022 // It's possible for any malloc to trigger sweeping, which may in 1023 // turn queue finalizers. Record this dynamic lock edge. 1024 // N.B. Compiled away if lockrank experiment is not enabled. 1025 lockRankMayQueueFinalizer() 1026 1027 // Pre-malloc debug hooks. 1028 if debug.malloc { 1029 if x := preMallocgcDebug(size, typ); x != nil { 1030 return x 1031 } 1032 } 1033 1034 // For ASAN, we allocate extra memory around each allocation called the "redzone." 1035 // These "redzones" are marked as unaddressable. 1036 var asanRZ uintptr 1037 if asanenabled { 1038 asanRZ = redZoneSize(size) 1039 size += asanRZ 1040 } 1041 1042 // Assist the GC if needed. 1043 if gcBlackenEnabled != 0 { 1044 deductAssistCredit(size) 1045 } 1046 1047 // Actually do the allocation. 1048 var x unsafe.Pointer 1049 var elemsize uintptr 1050 if size <= maxSmallSize-mallocHeaderSize { 1051 if typ == nil || !typ.Pointers() { 1052 if size < maxTinySize { 1053 x, elemsize = mallocgcTiny(size, typ, needzero) 1054 } else { 1055 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1056 } 1057 } else if heapBitsInSpan(size) { 1058 x, elemsize = mallocgcSmallScanNoHeader(size, typ, needzero) 1059 } else { 1060 x, elemsize = mallocgcSmallScanHeader(size, typ, needzero) 1061 } 1062 } else { 1063 x, elemsize = mallocgcLarge(size, typ, needzero) 1064 } 1065 1066 // Notify sanitizers, if enabled. 1067 if raceenabled { 1068 racemalloc(x, size-asanRZ) 1069 } 1070 if msanenabled { 1071 msanmalloc(x, size-asanRZ) 1072 } 1073 if asanenabled { 1074 // Poison the space between the end of the requested size of x 1075 // and the end of the slot. Unpoison the requested allocation. 1076 frag := elemsize - size 1077 if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-mallocHeaderSize { 1078 frag -= mallocHeaderSize 1079 } 1080 asanpoison(unsafe.Add(x, size-asanRZ), asanRZ) 1081 asanunpoison(x, size-asanRZ) 1082 } 1083 1084 // Adjust our GC assist debt to account for internal fragmentation. 1085 if gcBlackenEnabled != 0 && elemsize != 0 { 1086 if assistG := getg().m.curg; assistG != nil { 1087 assistG.gcAssistBytes -= int64(elemsize - size) 1088 } 1089 } 1090 1091 // Post-malloc debug hooks. 1092 if debug.malloc { 1093 postMallocgcDebug(x, elemsize, typ) 1094 } 1095 return x 1096 } 1097 1098 func mallocgcTiny(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1099 // Set mp.mallocing to keep from being preempted by GC. 1100 mp := acquirem() 1101 if doubleCheckMalloc { 1102 if mp.mallocing != 0 { 1103 throw("malloc deadlock") 1104 } 1105 if mp.gsignal == getg() { 1106 throw("malloc during signal") 1107 } 1108 if typ != nil && typ.Pointers() { 1109 throw("expected noscan for tiny alloc") 1110 } 1111 } 1112 mp.mallocing = 1 1113 1114 // Tiny allocator. 1115 // 1116 // Tiny allocator combines several tiny allocation requests 1117 // into a single memory block. The resulting memory block 1118 // is freed when all subobjects are unreachable. The subobjects 1119 // must be noscan (don't have pointers), this ensures that 1120 // the amount of potentially wasted memory is bounded. 1121 // 1122 // Size of the memory block used for combining (maxTinySize) is tunable. 1123 // Current setting is 16 bytes, which relates to 2x worst case memory 1124 // wastage (when all but one subobjects are unreachable). 1125 // 8 bytes would result in no wastage at all, but provides less 1126 // opportunities for combining. 1127 // 32 bytes provides more opportunities for combining, 1128 // but can lead to 4x worst case wastage. 1129 // The best case winning is 8x regardless of block size. 1130 // 1131 // Objects obtained from tiny allocator must not be freed explicitly. 1132 // So when an object will be freed explicitly, we ensure that 1133 // its size >= maxTinySize. 1134 // 1135 // SetFinalizer has a special case for objects potentially coming 1136 // from tiny allocator, it such case it allows to set finalizers 1137 // for an inner byte of a memory block. 1138 // 1139 // The main targets of tiny allocator are small strings and 1140 // standalone escaping variables. On a json benchmark 1141 // the allocator reduces number of allocations by ~12% and 1142 // reduces heap size by ~20%. 1143 c := getMCache(mp) 1144 off := c.tinyoffset 1145 // Align tiny pointer for required (conservative) alignment. 1146 if size&7 == 0 { 1147 off = alignUp(off, 8) 1148 } else if goarch.PtrSize == 4 && size == 12 { 1149 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1150 // systems so that objects whose first field is a 64-bit 1151 // value is aligned to 8 bytes and does not cause a fault on 1152 // atomic access. See issue 37262. 1153 // TODO(mknyszek): Remove this workaround if/when issue 36606 1154 // is resolved. 1155 off = alignUp(off, 8) 1156 } else if size&3 == 0 { 1157 off = alignUp(off, 4) 1158 } else if size&1 == 0 { 1159 off = alignUp(off, 2) 1160 } 1161 if off+size <= maxTinySize && c.tiny != 0 { 1162 // The object fits into existing tiny block. 1163 x := unsafe.Pointer(c.tiny + off) 1164 c.tinyoffset = off + size 1165 c.tinyAllocs++ 1166 mp.mallocing = 0 1167 releasem(mp) 1168 return x, 0 1169 } 1170 // Allocate a new maxTinySize block. 1171 checkGCTrigger := false 1172 span := c.alloc[tinySpanClass] 1173 v := nextFreeFast(span) 1174 if v == 0 { 1175 v, span, checkGCTrigger = c.nextFree(tinySpanClass) 1176 } 1177 x := unsafe.Pointer(v) 1178 (*[2]uint64)(x)[0] = 0 1179 (*[2]uint64)(x)[1] = 0 1180 // See if we need to replace the existing tiny block with the new one 1181 // based on amount of remaining free space. 1182 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1183 // Note: disabled when race detector is on, see comment near end of this function. 1184 c.tiny = uintptr(x) 1185 c.tinyoffset = size 1186 } 1187 1188 // Ensure that the stores above that initialize x to 1189 // type-safe memory and set the heap bits occur before 1190 // the caller can make x observable to the garbage 1191 // collector. Otherwise, on weakly ordered machines, 1192 // the garbage collector could follow a pointer to x, 1193 // but see uninitialized memory or stale heap bits. 1194 publicationBarrier() 1195 // As x and the heap bits are initialized, update 1196 // freeIndexForScan now so x is seen by the GC 1197 // (including conservative scan) as an allocated object. 1198 // While this pointer can't escape into user code as a 1199 // _live_ pointer until we return, conservative scanning 1200 // may find a dead pointer that happens to point into this 1201 // object. Delaying this update until now ensures that 1202 // conservative scanning considers this pointer dead until 1203 // this point. 1204 span.freeIndexForScan = span.freeindex 1205 1206 // Allocate black during GC. 1207 // All slots hold nil so no scanning is needed. 1208 // This may be racing with GC so do it atomically if there can be 1209 // a race marking the bit. 1210 if writeBarrier.enabled { 1211 gcmarknewobject(span, uintptr(x)) 1212 } 1213 1214 // Note cache c only valid while m acquired; see #47302 1215 // 1216 // N.B. Use the full size because that matches how the GC 1217 // will update the mem profile on the "free" side. 1218 // 1219 // TODO(mknyszek): We should really count the header as part 1220 // of gc_sys or something. The code below just pretends it is 1221 // internal fragmentation and matches the GC's accounting by 1222 // using the whole allocation slot. 1223 c.nextSample -= int64(span.elemsize) 1224 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1225 profilealloc(mp, x, span.elemsize) 1226 } 1227 mp.mallocing = 0 1228 releasem(mp) 1229 1230 if checkGCTrigger { 1231 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1232 gcStart(t) 1233 } 1234 } 1235 1236 if raceenabled { 1237 // Pad tinysize allocations so they are aligned with the end 1238 // of the tinyalloc region. This ensures that any arithmetic 1239 // that goes off the top end of the object will be detectable 1240 // by checkptr (issue 38872). 1241 // Note that we disable tinyalloc when raceenabled for this to work. 1242 // TODO: This padding is only performed when the race detector 1243 // is enabled. It would be nice to enable it if any package 1244 // was compiled with checkptr, but there's no easy way to 1245 // detect that (especially at compile time). 1246 // TODO: enable this padding for all allocations, not just 1247 // tinyalloc ones. It's tricky because of pointer maps. 1248 // Maybe just all noscan objects? 1249 x = add(x, span.elemsize-size) 1250 } 1251 return x, span.elemsize 1252 } 1253 1254 func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1255 // Set mp.mallocing to keep from being preempted by GC. 1256 mp := acquirem() 1257 if doubleCheckMalloc { 1258 if mp.mallocing != 0 { 1259 throw("malloc deadlock") 1260 } 1261 if mp.gsignal == getg() { 1262 throw("malloc during signal") 1263 } 1264 if typ != nil && typ.Pointers() { 1265 throw("expected noscan type for noscan alloc") 1266 } 1267 } 1268 mp.mallocing = 1 1269 1270 checkGCTrigger := false 1271 c := getMCache(mp) 1272 var sizeclass uint8 1273 if size <= smallSizeMax-8 { 1274 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1275 } else { 1276 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1277 } 1278 size = uintptr(class_to_size[sizeclass]) 1279 spc := makeSpanClass(sizeclass, true) 1280 span := c.alloc[spc] 1281 v := nextFreeFast(span) 1282 if v == 0 { 1283 v, span, checkGCTrigger = c.nextFree(spc) 1284 } 1285 x := unsafe.Pointer(v) 1286 if needzero && span.needzero != 0 { 1287 memclrNoHeapPointers(x, size) 1288 } 1289 1290 // Ensure that the stores above that initialize x to 1291 // type-safe memory and set the heap bits occur before 1292 // the caller can make x observable to the garbage 1293 // collector. Otherwise, on weakly ordered machines, 1294 // the garbage collector could follow a pointer to x, 1295 // but see uninitialized memory or stale heap bits. 1296 publicationBarrier() 1297 // As x and the heap bits are initialized, update 1298 // freeIndexForScan now so x is seen by the GC 1299 // (including conservative scan) as an allocated object. 1300 // While this pointer can't escape into user code as a 1301 // _live_ pointer until we return, conservative scanning 1302 // may find a dead pointer that happens to point into this 1303 // object. Delaying this update until now ensures that 1304 // conservative scanning considers this pointer dead until 1305 // this point. 1306 span.freeIndexForScan = span.freeindex 1307 1308 // Allocate black during GC. 1309 // All slots hold nil so no scanning is needed. 1310 // This may be racing with GC so do it atomically if there can be 1311 // a race marking the bit. 1312 if writeBarrier.enabled { 1313 gcmarknewobject(span, uintptr(x)) 1314 } 1315 1316 // Note cache c only valid while m acquired; see #47302 1317 // 1318 // N.B. Use the full size because that matches how the GC 1319 // will update the mem profile on the "free" side. 1320 // 1321 // TODO(mknyszek): We should really count the header as part 1322 // of gc_sys or something. The code below just pretends it is 1323 // internal fragmentation and matches the GC's accounting by 1324 // using the whole allocation slot. 1325 c.nextSample -= int64(size) 1326 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1327 profilealloc(mp, x, size) 1328 } 1329 mp.mallocing = 0 1330 releasem(mp) 1331 1332 if checkGCTrigger { 1333 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1334 gcStart(t) 1335 } 1336 } 1337 return x, size 1338 } 1339 1340 func mallocgcSmallScanNoHeader(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1341 // Set mp.mallocing to keep from being preempted by GC. 1342 mp := acquirem() 1343 if doubleCheckMalloc { 1344 if mp.mallocing != 0 { 1345 throw("malloc deadlock") 1346 } 1347 if mp.gsignal == getg() { 1348 throw("malloc during signal") 1349 } 1350 if typ == nil || !typ.Pointers() { 1351 throw("noscan allocated in scan-only path") 1352 } 1353 if !heapBitsInSpan(size) { 1354 throw("heap bits in not in span for non-header-only path") 1355 } 1356 } 1357 mp.mallocing = 1 1358 1359 checkGCTrigger := false 1360 c := getMCache(mp) 1361 sizeclass := size_to_class8[divRoundUp(size, smallSizeDiv)] 1362 spc := makeSpanClass(sizeclass, false) 1363 span := c.alloc[spc] 1364 v := nextFreeFast(span) 1365 if v == 0 { 1366 v, span, checkGCTrigger = c.nextFree(spc) 1367 } 1368 x := unsafe.Pointer(v) 1369 if needzero && span.needzero != 0 { 1370 memclrNoHeapPointers(x, size) 1371 } 1372 if goarch.PtrSize == 8 && sizeclass == 1 { 1373 // initHeapBits already set the pointer bits for the 8-byte sizeclass 1374 // on 64-bit platforms. 1375 c.scanAlloc += 8 1376 } else { 1377 c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span) 1378 } 1379 size = uintptr(class_to_size[sizeclass]) 1380 1381 // Ensure that the stores above that initialize x to 1382 // type-safe memory and set the heap bits occur before 1383 // the caller can make x observable to the garbage 1384 // collector. Otherwise, on weakly ordered machines, 1385 // the garbage collector could follow a pointer to x, 1386 // but see uninitialized memory or stale heap bits. 1387 publicationBarrier() 1388 // As x and the heap bits are initialized, update 1389 // freeIndexForScan now so x is seen by the GC 1390 // (including conservative scan) as an allocated object. 1391 // While this pointer can't escape into user code as a 1392 // _live_ pointer until we return, conservative scanning 1393 // may find a dead pointer that happens to point into this 1394 // object. Delaying this update until now ensures that 1395 // conservative scanning considers this pointer dead until 1396 // this point. 1397 span.freeIndexForScan = span.freeindex 1398 1399 // Allocate black during GC. 1400 // All slots hold nil so no scanning is needed. 1401 // This may be racing with GC so do it atomically if there can be 1402 // a race marking the bit. 1403 if writeBarrier.enabled { 1404 gcmarknewobject(span, uintptr(x)) 1405 } 1406 1407 // Note cache c only valid while m acquired; see #47302 1408 // 1409 // N.B. Use the full size because that matches how the GC 1410 // will update the mem profile on the "free" side. 1411 // 1412 // TODO(mknyszek): We should really count the header as part 1413 // of gc_sys or something. The code below just pretends it is 1414 // internal fragmentation and matches the GC's accounting by 1415 // using the whole allocation slot. 1416 c.nextSample -= int64(size) 1417 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1418 profilealloc(mp, x, size) 1419 } 1420 mp.mallocing = 0 1421 releasem(mp) 1422 1423 if checkGCTrigger { 1424 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1425 gcStart(t) 1426 } 1427 } 1428 return x, size 1429 } 1430 1431 func mallocgcSmallScanHeader(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1432 // Set mp.mallocing to keep from being preempted by GC. 1433 mp := acquirem() 1434 if doubleCheckMalloc { 1435 if mp.mallocing != 0 { 1436 throw("malloc deadlock") 1437 } 1438 if mp.gsignal == getg() { 1439 throw("malloc during signal") 1440 } 1441 if typ == nil || !typ.Pointers() { 1442 throw("noscan allocated in scan-only path") 1443 } 1444 if heapBitsInSpan(size) { 1445 throw("heap bits in span for header-only path") 1446 } 1447 } 1448 mp.mallocing = 1 1449 1450 checkGCTrigger := false 1451 c := getMCache(mp) 1452 size += mallocHeaderSize 1453 var sizeclass uint8 1454 if size <= smallSizeMax-8 { 1455 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1456 } else { 1457 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1458 } 1459 size = uintptr(class_to_size[sizeclass]) 1460 spc := makeSpanClass(sizeclass, false) 1461 span := c.alloc[spc] 1462 v := nextFreeFast(span) 1463 if v == 0 { 1464 v, span, checkGCTrigger = c.nextFree(spc) 1465 } 1466 x := unsafe.Pointer(v) 1467 if needzero && span.needzero != 0 { 1468 memclrNoHeapPointers(x, size) 1469 } 1470 header := (**_type)(x) 1471 x = add(x, mallocHeaderSize) 1472 c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-mallocHeaderSize, typ, header, span) 1473 1474 // Ensure that the stores above that initialize x to 1475 // type-safe memory and set the heap bits occur before 1476 // the caller can make x observable to the garbage 1477 // collector. Otherwise, on weakly ordered machines, 1478 // the garbage collector could follow a pointer to x, 1479 // but see uninitialized memory or stale heap bits. 1480 publicationBarrier() 1481 // As x and the heap bits are initialized, update 1482 // freeIndexForScan now so x is seen by the GC 1483 // (including conservative scan) as an allocated object. 1484 // While this pointer can't escape into user code as a 1485 // _live_ pointer until we return, conservative scanning 1486 // may find a dead pointer that happens to point into this 1487 // object. Delaying this update until now ensures that 1488 // conservative scanning considers this pointer dead until 1489 // this point. 1490 span.freeIndexForScan = span.freeindex 1491 1492 // Allocate black during GC. 1493 // All slots hold nil so no scanning is needed. 1494 // This may be racing with GC so do it atomically if there can be 1495 // a race marking the bit. 1496 if writeBarrier.enabled { 1497 gcmarknewobject(span, uintptr(x)) 1498 } 1499 1500 // Note cache c only valid while m acquired; see #47302 1501 // 1502 // N.B. Use the full size because that matches how the GC 1503 // will update the mem profile on the "free" side. 1504 // 1505 // TODO(mknyszek): We should really count the header as part 1506 // of gc_sys or something. The code below just pretends it is 1507 // internal fragmentation and matches the GC's accounting by 1508 // using the whole allocation slot. 1509 c.nextSample -= int64(size) 1510 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1511 profilealloc(mp, x, size) 1512 } 1513 mp.mallocing = 0 1514 releasem(mp) 1515 1516 if checkGCTrigger { 1517 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1518 gcStart(t) 1519 } 1520 } 1521 return x, size 1522 } 1523 1524 func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1525 // Set mp.mallocing to keep from being preempted by GC. 1526 mp := acquirem() 1527 if doubleCheckMalloc { 1528 if mp.mallocing != 0 { 1529 throw("malloc deadlock") 1530 } 1531 if mp.gsignal == getg() { 1532 throw("malloc during signal") 1533 } 1534 } 1535 mp.mallocing = 1 1536 1537 c := getMCache(mp) 1538 // For large allocations, keep track of zeroed state so that 1539 // bulk zeroing can be happen later in a preemptible context. 1540 span := c.allocLarge(size, typ == nil || !typ.Pointers()) 1541 span.freeindex = 1 1542 span.allocCount = 1 1543 span.largeType = nil // Tell the GC not to look at this yet. 1544 size = span.elemsize 1545 x := unsafe.Pointer(span.base()) 1546 1547 // Ensure that the stores above that initialize x to 1548 // type-safe memory and set the heap bits occur before 1549 // the caller can make x observable to the garbage 1550 // collector. Otherwise, on weakly ordered machines, 1551 // the garbage collector could follow a pointer to x, 1552 // but see uninitialized memory or stale heap bits. 1553 publicationBarrier() 1554 // As x and the heap bits are initialized, update 1555 // freeIndexForScan now so x is seen by the GC 1556 // (including conservative scan) as an allocated object. 1557 // While this pointer can't escape into user code as a 1558 // _live_ pointer until we return, conservative scanning 1559 // may find a dead pointer that happens to point into this 1560 // object. Delaying this update until now ensures that 1561 // conservative scanning considers this pointer dead until 1562 // this point. 1563 span.freeIndexForScan = span.freeindex 1564 1565 // Allocate black during GC. 1566 // All slots hold nil so no scanning is needed. 1567 // This may be racing with GC so do it atomically if there can be 1568 // a race marking the bit. 1569 if writeBarrier.enabled { 1570 gcmarknewobject(span, uintptr(x)) 1571 } 1572 1573 // Note cache c only valid while m acquired; see #47302 1574 // 1575 // N.B. Use the full size because that matches how the GC 1576 // will update the mem profile on the "free" side. 1577 // 1578 // TODO(mknyszek): We should really count the header as part 1579 // of gc_sys or something. The code below just pretends it is 1580 // internal fragmentation and matches the GC's accounting by 1581 // using the whole allocation slot. 1582 c.nextSample -= int64(size) 1583 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1584 profilealloc(mp, x, size) 1585 } 1586 mp.mallocing = 0 1587 releasem(mp) 1588 1589 // Check to see if we need to trigger the GC. 1590 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1591 gcStart(t) 1592 } 1593 1594 // Objects can be zeroed late in a context where preemption can occur. 1595 // If the object contains pointers, its pointer data must be cleared 1596 // or otherwise indicate that the GC shouldn't scan it. 1597 // x will keep the memory alive. 1598 if noscan := typ == nil || !typ.Pointers(); !noscan || (needzero && span.needzero != 0) { 1599 // N.B. size == fullSize always in this case. 1600 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1601 1602 // Finish storing the type information for this case. 1603 mp := acquirem() 1604 if !noscan { 1605 getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span) 1606 } 1607 // Publish the object with the now-zeroed memory. 1608 publicationBarrier() 1609 releasem(mp) 1610 } 1611 return x, size 1612 } 1613 1614 func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer { 1615 if debug.sbrk != 0 { 1616 align := uintptr(16) 1617 if typ != nil { 1618 // TODO(austin): This should be just 1619 // align = uintptr(typ.align) 1620 // but that's only 4 on 32-bit platforms, 1621 // even if there's a uint64 field in typ (see #599). 1622 // This causes 64-bit atomic accesses to panic. 1623 // Hence, we use stricter alignment that matches 1624 // the normal allocator better. 1625 if size&7 == 0 { 1626 align = 8 1627 } else if size&3 == 0 { 1628 align = 4 1629 } else if size&1 == 0 { 1630 align = 2 1631 } else { 1632 align = 1 1633 } 1634 } 1635 return persistentalloc(size, align, &memstats.other_sys) 1636 } 1637 if inittrace.active && inittrace.id == getg().goid { 1638 // Init functions are executed sequentially in a single goroutine. 1639 inittrace.allocs += 1 1640 } 1641 return nil 1642 } 1643 1644 func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) { 1645 if inittrace.active && inittrace.id == getg().goid { 1646 // Init functions are executed sequentially in a single goroutine. 1647 inittrace.bytes += uint64(elemsize) 1648 } 1649 1650 if traceAllocFreeEnabled() { 1651 trace := traceAcquire() 1652 if trace.ok() { 1653 trace.HeapObjectAlloc(uintptr(x), typ) 1654 traceRelease(trace) 1655 } 1656 } 1657 } 1658 1659 // deductAssistCredit reduces the current G's assist credit 1660 // by size bytes, and assists the GC if necessary. 1661 // 1662 // Caller must be preemptible. 1663 // 1664 // Returns the G for which the assist credit was accounted. 1665 func deductAssistCredit(size uintptr) { 1666 // Charge the current user G for this allocation. 1667 assistG := getg() 1668 if assistG.m.curg != nil { 1669 assistG = assistG.m.curg 1670 } 1671 // Charge the allocation against the G. We'll account 1672 // for internal fragmentation at the end of mallocgc. 1673 assistG.gcAssistBytes -= int64(size) 1674 1675 if assistG.gcAssistBytes < 0 { 1676 // This G is in debt. Assist the GC to correct 1677 // this before allocating. This must happen 1678 // before disabling preemption. 1679 gcAssistAlloc(assistG) 1680 } 1681 } 1682 1683 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1684 // on chunks of the buffer to be zeroed, with opportunities for preemption 1685 // along the way. memclrNoHeapPointers contains no safepoints and also 1686 // cannot be preemptively scheduled, so this provides a still-efficient 1687 // block copy that can also be preempted on a reasonable granularity. 1688 // 1689 // Use this with care; if the data being cleared is tagged to contain 1690 // pointers, this allows the GC to run before it is all cleared. 1691 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1692 v := uintptr(x) 1693 // got this from benchmarking. 128k is too small, 512k is too large. 1694 const chunkBytes = 256 * 1024 1695 vsize := v + size 1696 for voff := v; voff < vsize; voff = voff + chunkBytes { 1697 if getg().preempt { 1698 // may hold locks, e.g., profiling 1699 goschedguarded() 1700 } 1701 // clear min(avail, lump) bytes 1702 n := vsize - voff 1703 if n > chunkBytes { 1704 n = chunkBytes 1705 } 1706 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1707 } 1708 } 1709 1710 // implementation of new builtin 1711 // compiler (both frontend and SSA backend) knows the signature 1712 // of this function. 1713 func newobject(typ *_type) unsafe.Pointer { 1714 return mallocgc(typ.Size_, typ, true) 1715 } 1716 1717 //go:linkname maps_newobject internal/runtime/maps.newobject 1718 func maps_newobject(typ *_type) unsafe.Pointer { 1719 return newobject(typ) 1720 } 1721 1722 // reflect_unsafe_New is meant for package reflect, 1723 // but widely used packages access it using linkname. 1724 // Notable members of the hall of shame include: 1725 // - gitee.com/quant1x/gox 1726 // - github.com/goccy/json 1727 // - github.com/modern-go/reflect2 1728 // - github.com/v2pro/plz 1729 // 1730 // Do not remove or change the type signature. 1731 // See go.dev/issue/67401. 1732 // 1733 //go:linkname reflect_unsafe_New reflect.unsafe_New 1734 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1735 return mallocgc(typ.Size_, typ, true) 1736 } 1737 1738 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1739 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1740 return mallocgc(typ.Size_, typ, true) 1741 } 1742 1743 // newarray allocates an array of n elements of type typ. 1744 // 1745 // newarray should be an internal detail, 1746 // but widely used packages access it using linkname. 1747 // Notable members of the hall of shame include: 1748 // - github.com/RomiChan/protobuf 1749 // - github.com/segmentio/encoding 1750 // - github.com/ugorji/go/codec 1751 // 1752 // Do not remove or change the type signature. 1753 // See go.dev/issue/67401. 1754 // 1755 //go:linkname newarray 1756 func newarray(typ *_type, n int) unsafe.Pointer { 1757 if n == 1 { 1758 return mallocgc(typ.Size_, typ, true) 1759 } 1760 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 1761 if overflow || mem > maxAlloc || n < 0 { 1762 panic(plainError("runtime: allocation size out of range")) 1763 } 1764 return mallocgc(mem, typ, true) 1765 } 1766 1767 // reflect_unsafe_NewArray is meant for package reflect, 1768 // but widely used packages access it using linkname. 1769 // Notable members of the hall of shame include: 1770 // - gitee.com/quant1x/gox 1771 // - github.com/bytedance/sonic 1772 // - github.com/goccy/json 1773 // - github.com/modern-go/reflect2 1774 // - github.com/segmentio/encoding 1775 // - github.com/segmentio/kafka-go 1776 // - github.com/v2pro/plz 1777 // 1778 // Do not remove or change the type signature. 1779 // See go.dev/issue/67401. 1780 // 1781 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1782 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1783 return newarray(typ, n) 1784 } 1785 1786 //go:linkname maps_newarray internal/runtime/maps.newarray 1787 func maps_newarray(typ *_type, n int) unsafe.Pointer { 1788 return newarray(typ, n) 1789 } 1790 1791 // profilealloc resets the current mcache's nextSample counter and 1792 // records a memory profile sample. 1793 // 1794 // The caller must be non-preemptible and have a P. 1795 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1796 c := getMCache(mp) 1797 if c == nil { 1798 throw("profilealloc called without a P or outside bootstrapping") 1799 } 1800 c.memProfRate = MemProfileRate 1801 c.nextSample = nextSample() 1802 mProf_Malloc(mp, x, size) 1803 } 1804 1805 // nextSample returns the next sampling point for heap profiling. The goal is 1806 // to sample allocations on average every MemProfileRate bytes, but with a 1807 // completely random distribution over the allocation timeline; this 1808 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1809 // processes, the distance between two samples follows the exponential 1810 // distribution (exp(MemProfileRate)), so the best return value is a random 1811 // number taken from an exponential distribution whose mean is MemProfileRate. 1812 func nextSample() int64 { 1813 if MemProfileRate == 0 { 1814 // Basically never sample. 1815 return maxInt64 1816 } 1817 if MemProfileRate == 1 { 1818 // Sample immediately. 1819 return 0 1820 } 1821 if GOOS == "plan9" { 1822 // Plan 9 doesn't support floating point in note handler. 1823 if gp := getg(); gp == gp.m.gsignal { 1824 return nextSampleNoFP() 1825 } 1826 } 1827 1828 return int64(fastexprand(MemProfileRate)) 1829 } 1830 1831 // fastexprand returns a random number from an exponential distribution with 1832 // the specified mean. 1833 func fastexprand(mean int) int32 { 1834 // Avoid overflow. Maximum possible step is 1835 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1836 switch { 1837 case mean > 0x7000000: 1838 mean = 0x7000000 1839 case mean == 0: 1840 return 0 1841 } 1842 1843 // Take a random sample of the exponential distribution exp(-mean*x). 1844 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1845 // p = 1 - exp(-mean*x), so 1846 // q = 1 - p == exp(-mean*x) 1847 // log_e(q) = -mean*x 1848 // -log_e(q)/mean = x 1849 // x = -log_e(q) * mean 1850 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1851 const randomBitCount = 26 1852 q := cheaprandn(1<<randomBitCount) + 1 1853 qlog := fastlog2(float64(q)) - randomBitCount 1854 if qlog > 0 { 1855 qlog = 0 1856 } 1857 const minusLog2 = -0.6931471805599453 // -ln(2) 1858 return int32(qlog*(minusLog2*float64(mean))) + 1 1859 } 1860 1861 // nextSampleNoFP is similar to nextSample, but uses older, 1862 // simpler code to avoid floating point. 1863 func nextSampleNoFP() int64 { 1864 // Set first allocation sample size. 1865 rate := MemProfileRate 1866 if rate > 0x3fffffff { // make 2*rate not overflow 1867 rate = 0x3fffffff 1868 } 1869 if rate != 0 { 1870 return int64(cheaprandn(uint32(2 * rate))) 1871 } 1872 return 0 1873 } 1874 1875 type persistentAlloc struct { 1876 base *notInHeap 1877 off uintptr 1878 } 1879 1880 var globalAlloc struct { 1881 mutex 1882 persistentAlloc 1883 } 1884 1885 // persistentChunkSize is the number of bytes we allocate when we grow 1886 // a persistentAlloc. 1887 const persistentChunkSize = 256 << 10 1888 1889 // persistentChunks is a list of all the persistent chunks we have 1890 // allocated. The list is maintained through the first word in the 1891 // persistent chunk. This is updated atomically. 1892 var persistentChunks *notInHeap 1893 1894 // Wrapper around sysAlloc that can allocate small chunks. 1895 // There is no associated free operation. 1896 // Intended for things like function/type/debug-related persistent data. 1897 // If align is 0, uses default align (currently 8). 1898 // The returned memory will be zeroed. 1899 // sysStat must be non-nil. 1900 // 1901 // Consider marking persistentalloc'd types not in heap by embedding 1902 // internal/runtime/sys.NotInHeap. 1903 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1904 var p *notInHeap 1905 systemstack(func() { 1906 p = persistentalloc1(size, align, sysStat) 1907 }) 1908 return unsafe.Pointer(p) 1909 } 1910 1911 // Must run on system stack because stack growth can (re)invoke it. 1912 // See issue 9174. 1913 // 1914 //go:systemstack 1915 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1916 const ( 1917 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1918 ) 1919 1920 if size == 0 { 1921 throw("persistentalloc: size == 0") 1922 } 1923 if align != 0 { 1924 if align&(align-1) != 0 { 1925 throw("persistentalloc: align is not a power of 2") 1926 } 1927 if align > _PageSize { 1928 throw("persistentalloc: align is too large") 1929 } 1930 } else { 1931 align = 8 1932 } 1933 1934 if size >= maxBlock { 1935 return (*notInHeap)(sysAlloc(size, sysStat)) 1936 } 1937 1938 mp := acquirem() 1939 var persistent *persistentAlloc 1940 if mp != nil && mp.p != 0 { 1941 persistent = &mp.p.ptr().palloc 1942 } else { 1943 lock(&globalAlloc.mutex) 1944 persistent = &globalAlloc.persistentAlloc 1945 } 1946 persistent.off = alignUp(persistent.off, align) 1947 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1948 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1949 if persistent.base == nil { 1950 if persistent == &globalAlloc.persistentAlloc { 1951 unlock(&globalAlloc.mutex) 1952 } 1953 throw("runtime: cannot allocate memory") 1954 } 1955 1956 // Add the new chunk to the persistentChunks list. 1957 for { 1958 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1959 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1960 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1961 break 1962 } 1963 } 1964 persistent.off = alignUp(goarch.PtrSize, align) 1965 } 1966 p := persistent.base.add(persistent.off) 1967 persistent.off += size 1968 releasem(mp) 1969 if persistent == &globalAlloc.persistentAlloc { 1970 unlock(&globalAlloc.mutex) 1971 } 1972 1973 if sysStat != &memstats.other_sys { 1974 sysStat.add(int64(size)) 1975 memstats.other_sys.add(-int64(size)) 1976 } 1977 return p 1978 } 1979 1980 // inPersistentAlloc reports whether p points to memory allocated by 1981 // persistentalloc. This must be nosplit because it is called by the 1982 // cgo checker code, which is called by the write barrier code. 1983 // 1984 //go:nosplit 1985 func inPersistentAlloc(p uintptr) bool { 1986 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1987 for chunk != 0 { 1988 if p >= chunk && p < chunk+persistentChunkSize { 1989 return true 1990 } 1991 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1992 } 1993 return false 1994 } 1995 1996 // linearAlloc is a simple linear allocator that pre-reserves a region 1997 // of memory and then optionally maps that region into the Ready state 1998 // as needed. 1999 // 2000 // The caller is responsible for locking. 2001 type linearAlloc struct { 2002 next uintptr // next free byte 2003 mapped uintptr // one byte past end of mapped space 2004 end uintptr // end of reserved space 2005 2006 mapMemory bool // transition memory from Reserved to Ready if true 2007 } 2008 2009 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 2010 if base+size < base { 2011 // Chop off the last byte. The runtime isn't prepared 2012 // to deal with situations where the bounds could overflow. 2013 // Leave that memory reserved, though, so we don't map it 2014 // later. 2015 size -= 1 2016 } 2017 l.next, l.mapped = base, base 2018 l.end = base + size 2019 l.mapMemory = mapMemory 2020 } 2021 2022 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 2023 p := alignUp(l.next, align) 2024 if p+size > l.end { 2025 return nil 2026 } 2027 l.next = p + size 2028 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 2029 if l.mapMemory { 2030 // Transition from Reserved to Prepared to Ready. 2031 n := pEnd - l.mapped 2032 sysMap(unsafe.Pointer(l.mapped), n, sysStat) 2033 sysUsed(unsafe.Pointer(l.mapped), n, n) 2034 } 2035 l.mapped = pEnd 2036 } 2037 return unsafe.Pointer(p) 2038 } 2039 2040 // notInHeap is off-heap memory allocated by a lower-level allocator 2041 // like sysAlloc or persistentAlloc. 2042 // 2043 // In general, it's better to use real types which embed 2044 // internal/runtime/sys.NotInHeap, but this serves as a generic type 2045 // for situations where that isn't possible (like in the allocators). 2046 // 2047 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 2048 type notInHeap struct{ _ sys.NotInHeap } 2049 2050 func (p *notInHeap) add(bytes uintptr) *notInHeap { 2051 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 2052 } 2053 2054 // redZoneSize computes the size of the redzone for a given allocation. 2055 // Refer to the implementation of the compiler-rt. 2056 func redZoneSize(userSize uintptr) uintptr { 2057 switch { 2058 case userSize <= (64 - 16): 2059 return 16 << 0 2060 case userSize <= (128 - 32): 2061 return 16 << 1 2062 case userSize <= (512 - 64): 2063 return 16 << 2 2064 case userSize <= (4096 - 128): 2065 return 16 << 3 2066 case userSize <= (1<<14)-256: 2067 return 16 << 4 2068 case userSize <= (1<<15)-512: 2069 return 16 << 5 2070 case userSize <= (1<<16)-1024: 2071 return 16 << 6 2072 default: 2073 return 16 << 7 2074 } 2075 } 2076