Source file
src/runtime/mpagealloc_64bit.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "unsafe"
11 )
12
13 const (
14
15 summaryLevels = 5
16
17
18 pageAlloc32Bit = 0
19 pageAlloc64Bit = 1
20
21
22
23
24
25
26 pallocChunksL1Bits = 13
27 )
28
29
30
31
32
33 var levelBits = [summaryLevels]uint{
34 summaryL0Bits,
35 summaryLevelBits,
36 summaryLevelBits,
37 summaryLevelBits,
38 summaryLevelBits,
39 }
40
41
42
43
44
45
46
47
48 var levelShift = [summaryLevels]uint{
49 heapAddrBits - summaryL0Bits,
50 heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
51 heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
52 heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
53 heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
54 }
55
56
57
58
59
60 var levelLogPages = [summaryLevels]uint{
61 logPallocChunkPages + 4*summaryLevelBits,
62 logPallocChunkPages + 3*summaryLevelBits,
63 logPallocChunkPages + 2*summaryLevelBits,
64 logPallocChunkPages + 1*summaryLevelBits,
65 logPallocChunkPages,
66 }
67
68
69
70
71 func (p *pageAlloc) sysInit(test bool) {
72
73
74 for l, shift := range levelShift {
75 entries := 1 << (heapAddrBits - shift)
76
77
78 b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
79 r := sysReserve(nil, b)
80 if r == nil {
81 throw("failed to reserve page summary memory")
82 }
83
84
85 sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
86 p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
87 }
88 }
89
90
91
92
93
94
95
96
97
98
99
100 func (p *pageAlloc) sysGrow(base, limit uintptr) {
101 if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
102 print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
103 throw("sysGrow bounds not aligned to pallocChunkBytes")
104 }
105
106
107
108
109 addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
110 sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
111 return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
112 }
113
114
115
116
117 summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
118 baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
119 limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
120 base := unsafe.Pointer(&p.summary[level][0])
121 return addrRange{
122 offAddr{uintptr(add(base, baseOffset))},
123 offAddr{uintptr(add(base, limitOffset))},
124 }
125 }
126
127
128
129
130 addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
131 sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
132 return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
133 }
134
135
136
137
138
139
140
141
142
143
144 inUseIndex := p.inUse.findSucc(base)
145
146
147 for l := range p.summary {
148
149 needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
150
151
152
153
154
155 if needIdxLimit > len(p.summary[l]) {
156 p.summary[l] = p.summary[l][:needIdxLimit]
157 }
158
159
160 need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
161
162
163
164
165
166
167 if inUseIndex > 0 {
168 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
169 }
170 if inUseIndex < len(p.inUse.ranges) {
171 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
172 }
173
174 if need.size() == 0 {
175 continue
176 }
177
178
179 sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
180 sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
181 p.summaryMappedReady += need.size()
182 }
183
184
185 p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat)
186 }
187
188
189
190
191 func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr {
192 if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
193 print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
194 throw("sysGrow bounds not aligned to pallocChunkBytes")
195 }
196 scSize := unsafe.Sizeof(atomicScavChunkData{})
197
198
199
200
201
202
203
204
205
206
207
208 haveMin := s.min.Load()
209 haveMax := s.max.Load()
210 needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
211 needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
212
213
214 if needMax < haveMin {
215 needMax = haveMin
216 }
217 if haveMax != 0 && needMin > haveMax {
218 needMin = haveMax
219 }
220
221
222 chunksBase := uintptr(unsafe.Pointer(&s.chunks[0]))
223 have := makeAddrRange(chunksBase+haveMin*scSize, chunksBase+haveMax*scSize)
224 need := makeAddrRange(chunksBase+needMin*scSize, chunksBase+needMax*scSize)
225
226
227
228 need = need.subtract(have)
229
230
231 if need.size() != 0 {
232 sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
233 sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
234
235 if haveMax == 0 || needMin < haveMin {
236 s.min.Store(needMin)
237 }
238 if needMax > haveMax {
239 s.max.Store(needMax)
240 }
241 }
242 return need.size()
243 }
244
245
246
247
248 func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr {
249 n := uintptr(1<<heapAddrBits) / pallocChunkBytes
250 nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
251 r := sysReserve(nil, nbytes)
252 sl := notInHeapSlice{(*notInHeap)(r), int(n), int(n)}
253 s.chunks = *(*[]atomicScavChunkData)(unsafe.Pointer(&sl))
254 return 0
255 }
256
View as plain text