Source file src/runtime/mem_sbrk.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build plan9 || wasm
     6  
     7  package runtime
     8  
     9  import "unsafe"
    10  
    11  const isSbrkPlatform = true
    12  
    13  const memDebug = false
    14  
    15  // Memory management on sbrk systems (including the linear memory
    16  // on Wasm).
    17  
    18  // bloc is the runtime's sense of the break, which can go up or
    19  // down. blocMax is the system's break, also the high water mark
    20  // of bloc. The runtime uses memory up to bloc. The memory
    21  // between bloc and blocMax is allocated by the OS but not used
    22  // by the runtime.
    23  //
    24  // When the runtime needs to grow the heap address range, it
    25  // increases bloc. When it needs to grow beyond blocMax, it calls
    26  // the system sbrk to allocate more memory (and therefore
    27  // increase blocMax).
    28  //
    29  // When the runtime frees memory at the end of the address space,
    30  // it decreases bloc, but does not reduces the system break (as
    31  // the OS doesn't support it). When the runtime frees memory in
    32  // the middle of the address space, the memory goes to a free
    33  // list.
    34  
    35  var bloc uintptr    // The runtime's sense of break. Can go up or down.
    36  var blocMax uintptr // The break of the OS. Only increase.
    37  var memlock mutex
    38  
    39  type memHdr struct {
    40  	next memHdrPtr
    41  	size uintptr
    42  }
    43  
    44  var memFreelist memHdrPtr // sorted in ascending order
    45  
    46  type memHdrPtr uintptr
    47  
    48  func (p memHdrPtr) ptr() *memHdr   { return (*memHdr)(unsafe.Pointer(p)) }
    49  func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
    50  
    51  // memAlloc allocates n bytes from the brk reservation, or if it's full,
    52  // the system.
    53  //
    54  // memlock must be held.
    55  //
    56  // memAlloc must be called on the system stack, otherwise a stack growth
    57  // could cause us to call back into it. Since memlock is held, that could
    58  // lead to a self-deadlock.
    59  //
    60  //go:systemstack
    61  func memAlloc(n uintptr) unsafe.Pointer {
    62  	if p := memAllocNoGrow(n); p != nil {
    63  		return p
    64  	}
    65  	return sbrk(n)
    66  }
    67  
    68  // memAllocNoGrow attempts to allocate n bytes from the existing brk.
    69  //
    70  // memlock must be held.
    71  //
    72  // memAlloc must be called on the system stack, otherwise a stack growth
    73  // could cause us to call back into it. Since memlock is held, that could
    74  // lead to a self-deadlock.
    75  //
    76  //go:systemstack
    77  func memAllocNoGrow(n uintptr) unsafe.Pointer {
    78  	n = memRound(n)
    79  	var prevp *memHdr
    80  	for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
    81  		if p.size >= n {
    82  			if p.size == n {
    83  				if prevp != nil {
    84  					prevp.next = p.next
    85  				} else {
    86  					memFreelist = p.next
    87  				}
    88  			} else {
    89  				p.size -= n
    90  				p = (*memHdr)(add(unsafe.Pointer(p), p.size))
    91  			}
    92  			*p = memHdr{}
    93  			return unsafe.Pointer(p)
    94  		}
    95  		prevp = p
    96  	}
    97  	return nil
    98  }
    99  
   100  // memFree makes [ap, ap+n) available for reallocation by memAlloc.
   101  //
   102  // memlock must be held.
   103  //
   104  // memAlloc must be called on the system stack, otherwise a stack growth
   105  // could cause us to call back into it. Since memlock is held, that could
   106  // lead to a self-deadlock.
   107  //
   108  //go:systemstack
   109  func memFree(ap unsafe.Pointer, n uintptr) {
   110  	n = memRound(n)
   111  	memclrNoHeapPointers(ap, n)
   112  	bp := (*memHdr)(ap)
   113  	bp.size = n
   114  	bpn := uintptr(ap)
   115  	if memFreelist == 0 {
   116  		bp.next = 0
   117  		memFreelist.set(bp)
   118  		return
   119  	}
   120  	p := memFreelist.ptr()
   121  	if bpn < uintptr(unsafe.Pointer(p)) {
   122  		memFreelist.set(bp)
   123  		if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
   124  			bp.size += p.size
   125  			bp.next = p.next
   126  			*p = memHdr{}
   127  		} else {
   128  			bp.next.set(p)
   129  		}
   130  		return
   131  	}
   132  	for ; p.next != 0; p = p.next.ptr() {
   133  		if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
   134  			break
   135  		}
   136  	}
   137  	if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
   138  		bp.size += p.next.ptr().size
   139  		bp.next = p.next.ptr().next
   140  		*p.next.ptr() = memHdr{}
   141  	} else {
   142  		bp.next = p.next
   143  	}
   144  	if uintptr(unsafe.Pointer(p))+p.size == bpn {
   145  		p.size += bp.size
   146  		p.next = bp.next
   147  		*bp = memHdr{}
   148  	} else {
   149  		p.next.set(bp)
   150  	}
   151  }
   152  
   153  // memCheck checks invariants around free list management.
   154  //
   155  // memlock must be held.
   156  //
   157  // memAlloc must be called on the system stack, otherwise a stack growth
   158  // could cause us to call back into it. Since memlock is held, that could
   159  // lead to a self-deadlock.
   160  //
   161  //go:systemstack
   162  func memCheck() {
   163  	if !memDebug {
   164  		return
   165  	}
   166  	for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
   167  		if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
   168  			print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
   169  			throw("mem: infinite loop")
   170  		}
   171  		if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
   172  			print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
   173  			throw("mem: unordered list")
   174  		}
   175  		if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
   176  			print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
   177  			throw("mem: overlapping blocks")
   178  		}
   179  		for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
   180  			if *(*byte)(b) != 0 {
   181  				print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
   182  				throw("mem: uninitialised memory")
   183  			}
   184  		}
   185  	}
   186  }
   187  
   188  func memRound(p uintptr) uintptr {
   189  	return alignUp(p, physPageSize)
   190  }
   191  
   192  func initBloc() {
   193  	bloc = memRound(firstmoduledata.end)
   194  	blocMax = bloc
   195  }
   196  
   197  func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
   198  	var p uintptr
   199  	systemstack(func() {
   200  		lock(&memlock)
   201  		p = uintptr(memAlloc(n))
   202  		memCheck()
   203  		unlock(&memlock)
   204  	})
   205  	return unsafe.Pointer(p)
   206  }
   207  
   208  func sysFreeOS(v unsafe.Pointer, n uintptr) {
   209  	systemstack(func() {
   210  		lock(&memlock)
   211  		if uintptr(v)+n == bloc {
   212  			// Address range being freed is at the end of memory,
   213  			// so record a new lower value for end of memory.
   214  			// Can't actually shrink address space because segment is shared.
   215  			memclrNoHeapPointers(v, n)
   216  			bloc -= n
   217  		} else {
   218  			memFree(v, n)
   219  			memCheck()
   220  		}
   221  		unlock(&memlock)
   222  	})
   223  }
   224  
   225  func sysUnusedOS(v unsafe.Pointer, n uintptr) {
   226  }
   227  
   228  func sysUsedOS(v unsafe.Pointer, n uintptr) {
   229  }
   230  
   231  func sysHugePageOS(v unsafe.Pointer, n uintptr) {
   232  }
   233  
   234  func sysNoHugePageOS(v unsafe.Pointer, n uintptr) {
   235  }
   236  
   237  func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
   238  }
   239  
   240  func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
   241  }
   242  
   243  func sysFaultOS(v unsafe.Pointer, n uintptr) {
   244  }
   245  
   246  func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
   247  	var p uintptr
   248  	systemstack(func() {
   249  		lock(&memlock)
   250  		if uintptr(v) == bloc {
   251  			// Address hint is the current end of memory,
   252  			// so try to extend the address space.
   253  			p = uintptr(sbrk(n))
   254  		}
   255  		if p == 0 && v == nil {
   256  			p = uintptr(memAlloc(n))
   257  			memCheck()
   258  		}
   259  		unlock(&memlock)
   260  	})
   261  	return unsafe.Pointer(p)
   262  }
   263  
   264  func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
   265  	var p uintptr
   266  	systemstack(func() {
   267  		lock(&memlock)
   268  		if base := memAllocNoGrow(size + align); base != nil {
   269  			// We can satisfy the reservation from the free list.
   270  			// Trim off the unaligned parts.
   271  			start := alignUp(uintptr(base), align)
   272  			if startLen := start - uintptr(base); startLen > 0 {
   273  				memFree(base, startLen)
   274  			}
   275  			end := start + size
   276  			if endLen := (uintptr(base) + size + align) - end; endLen > 0 {
   277  				memFree(unsafe.Pointer(end), endLen)
   278  			}
   279  			memCheck()
   280  			unlock(&memlock)
   281  			p = start
   282  			return
   283  		}
   284  
   285  		// Round up bloc to align, then allocate size.
   286  		p = alignUp(bloc, align)
   287  		r := sbrk(p + size - bloc)
   288  		if r == nil {
   289  			p, size = 0, 0
   290  		} else if l := p - uintptr(r); l > 0 {
   291  			// Free the area we skipped over for alignment.
   292  			memFree(r, l)
   293  			memCheck()
   294  		}
   295  		unlock(&memlock)
   296  	})
   297  	return unsafe.Pointer(p), size
   298  }
   299  

View as plain text