1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "go_asm.h"
6 #include "textflag.h"
7
8 TEXT ·Casint32(SB), NOSPLIT, $0-17
9 B ·Cas(SB)
10
11 TEXT ·Casint64(SB), NOSPLIT, $0-25
12 B ·Cas64(SB)
13
14 TEXT ·Casuintptr(SB), NOSPLIT, $0-25
15 B ·Cas64(SB)
16
17 TEXT ·CasRel(SB), NOSPLIT, $0-17
18 B ·Cas(SB)
19
20 TEXT ·Loadint32(SB), NOSPLIT, $0-12
21 B ·Load(SB)
22
23 TEXT ·Loadint64(SB), NOSPLIT, $0-16
24 B ·Load64(SB)
25
26 TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
27 B ·Load64(SB)
28
29 TEXT ·Loaduint(SB), NOSPLIT, $0-16
30 B ·Load64(SB)
31
32 TEXT ·Storeint32(SB), NOSPLIT, $0-12
33 B ·Store(SB)
34
35 TEXT ·Storeint64(SB), NOSPLIT, $0-16
36 B ·Store64(SB)
37
38 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
39 B ·Store64(SB)
40
41 TEXT ·Xaddint32(SB), NOSPLIT, $0-20
42 B ·Xadd(SB)
43
44 TEXT ·Xaddint64(SB), NOSPLIT, $0-24
45 B ·Xadd64(SB)
46
47 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
48 B ·Xadd64(SB)
49
50 TEXT ·Casp1(SB), NOSPLIT, $0-25
51 B ·Cas64(SB)
52
53 // uint32 ·Load(uint32 volatile* addr)
54 TEXT ·Load(SB),NOSPLIT,$0-12
55 MOVD ptr+0(FP), R0
56 LDARW (R0), R0
57 MOVW R0, ret+8(FP)
58 RET
59
60 // uint8 ·Load8(uint8 volatile* addr)
61 TEXT ·Load8(SB),NOSPLIT,$0-9
62 MOVD ptr+0(FP), R0
63 LDARB (R0), R0
64 MOVB R0, ret+8(FP)
65 RET
66
67 // uint64 ·Load64(uint64 volatile* addr)
68 TEXT ·Load64(SB),NOSPLIT,$0-16
69 MOVD ptr+0(FP), R0
70 LDAR (R0), R0
71 MOVD R0, ret+8(FP)
72 RET
73
74 // void *·Loadp(void *volatile *addr)
75 TEXT ·Loadp(SB),NOSPLIT,$0-16
76 MOVD ptr+0(FP), R0
77 LDAR (R0), R0
78 MOVD R0, ret+8(FP)
79 RET
80
81 // uint32 ·LoadAcq(uint32 volatile* addr)
82 TEXT ·LoadAcq(SB),NOSPLIT,$0-12
83 B ·Load(SB)
84
85 // uint64 ·LoadAcquintptr(uint64 volatile* addr)
86 TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
87 B ·Load64(SB)
88
89 // uintptr ·LoadAcq64(uintptr volatile* addr)
90 TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
91 B ·Load64(SB)
92
93 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
94 B ·Store64(SB)
95
96 TEXT ·StoreRel(SB), NOSPLIT, $0-12
97 B ·Store(SB)
98
99 TEXT ·StoreRel64(SB), NOSPLIT, $0-16
100 B ·Store64(SB)
101
102 TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
103 B ·Store64(SB)
104
105 TEXT ·Store(SB), NOSPLIT, $0-12
106 MOVD ptr+0(FP), R0
107 MOVW val+8(FP), R1
108 STLRW R1, (R0)
109 RET
110
111 TEXT ·Store8(SB), NOSPLIT, $0-9
112 MOVD ptr+0(FP), R0
113 MOVB val+8(FP), R1
114 STLRB R1, (R0)
115 RET
116
117 TEXT ·Store64(SB), NOSPLIT, $0-16
118 MOVD ptr+0(FP), R0
119 MOVD val+8(FP), R1
120 STLR R1, (R0)
121 RET
122
123 // uint8 Xchg(ptr *uint8, new uint8)
124 // Atomically:
125 // old := *ptr;
126 // *ptr = new;
127 // return old;
128 TEXT ·Xchg8(SB), NOSPLIT, $0-17
129 MOVD ptr+0(FP), R0
130 MOVB new+8(FP), R1
131 #ifndef GOARM64_LSE
132 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
133 CBZ R4, load_store_loop
134 #endif
135 SWPALB R1, (R0), R2
136 MOVB R2, ret+16(FP)
137 RET
138 #ifndef GOARM64_LSE
139 load_store_loop:
140 LDAXRB (R0), R2
141 STLXRB R1, (R0), R3
142 CBNZ R3, load_store_loop
143 MOVB R2, ret+16(FP)
144 RET
145 #endif
146
147 // uint32 Xchg(ptr *uint32, new uint32)
148 // Atomically:
149 // old := *ptr;
150 // *ptr = new;
151 // return old;
152 TEXT ·Xchg(SB), NOSPLIT, $0-20
153 MOVD ptr+0(FP), R0
154 MOVW new+8(FP), R1
155 #ifndef GOARM64_LSE
156 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
157 CBZ R4, load_store_loop
158 #endif
159 SWPALW R1, (R0), R2
160 MOVW R2, ret+16(FP)
161 RET
162 #ifndef GOARM64_LSE
163 load_store_loop:
164 LDAXRW (R0), R2
165 STLXRW R1, (R0), R3
166 CBNZ R3, load_store_loop
167 MOVW R2, ret+16(FP)
168 RET
169 #endif
170
171 // uint64 Xchg64(ptr *uint64, new uint64)
172 // Atomically:
173 // old := *ptr;
174 // *ptr = new;
175 // return old;
176 TEXT ·Xchg64(SB), NOSPLIT, $0-24
177 MOVD ptr+0(FP), R0
178 MOVD new+8(FP), R1
179 #ifndef GOARM64_LSE
180 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
181 CBZ R4, load_store_loop
182 #endif
183 SWPALD R1, (R0), R2
184 MOVD R2, ret+16(FP)
185 RET
186 #ifndef GOARM64_LSE
187 load_store_loop:
188 LDAXR (R0), R2
189 STLXR R1, (R0), R3
190 CBNZ R3, load_store_loop
191 MOVD R2, ret+16(FP)
192 RET
193 #endif
194
195 // bool Cas(uint32 *ptr, uint32 old, uint32 new)
196 // Atomically:
197 // if(*val == old){
198 // *val = new;
199 // return 1;
200 // } else
201 // return 0;
202 TEXT ·Cas(SB), NOSPLIT, $0-17
203 MOVD ptr+0(FP), R0
204 MOVW old+8(FP), R1
205 MOVW new+12(FP), R2
206 #ifndef GOARM64_LSE
207 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
208 CBZ R4, load_store_loop
209 #endif
210 MOVD R1, R3
211 CASALW R3, (R0), R2
212 CMP R1, R3
213 CSET EQ, R0
214 MOVB R0, ret+16(FP)
215 RET
216 #ifndef GOARM64_LSE
217 load_store_loop:
218 LDAXRW (R0), R3
219 CMPW R1, R3
220 BNE ok
221 STLXRW R2, (R0), R3
222 CBNZ R3, load_store_loop
223 ok:
224 CSET EQ, R0
225 MOVB R0, ret+16(FP)
226 RET
227 #endif
228
229 // bool ·Cas64(uint64 *ptr, uint64 old, uint64 new)
230 // Atomically:
231 // if(*val == old){
232 // *val = new;
233 // return 1;
234 // } else {
235 // return 0;
236 // }
237 TEXT ·Cas64(SB), NOSPLIT, $0-25
238 MOVD ptr+0(FP), R0
239 MOVD old+8(FP), R1
240 MOVD new+16(FP), R2
241 #ifndef GOARM64_LSE
242 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
243 CBZ R4, load_store_loop
244 #endif
245 MOVD R1, R3
246 CASALD R3, (R0), R2
247 CMP R1, R3
248 CSET EQ, R0
249 MOVB R0, ret+24(FP)
250 RET
251 #ifndef GOARM64_LSE
252 load_store_loop:
253 LDAXR (R0), R3
254 CMP R1, R3
255 BNE ok
256 STLXR R2, (R0), R3
257 CBNZ R3, load_store_loop
258 ok:
259 CSET EQ, R0
260 MOVB R0, ret+24(FP)
261 RET
262 #endif
263
264 // uint32 xadd(uint32 volatile *ptr, int32 delta)
265 // Atomically:
266 // *val += delta;
267 // return *val;
268 TEXT ·Xadd(SB), NOSPLIT, $0-20
269 MOVD ptr+0(FP), R0
270 MOVW delta+8(FP), R1
271 #ifndef GOARM64_LSE
272 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
273 CBZ R4, load_store_loop
274 #endif
275 LDADDALW R1, (R0), R2
276 ADD R1, R2
277 MOVW R2, ret+16(FP)
278 RET
279 #ifndef GOARM64_LSE
280 load_store_loop:
281 LDAXRW (R0), R2
282 ADDW R2, R1, R2
283 STLXRW R2, (R0), R3
284 CBNZ R3, load_store_loop
285 MOVW R2, ret+16(FP)
286 RET
287 #endif
288
289 // uint64 Xadd64(uint64 volatile *ptr, int64 delta)
290 // Atomically:
291 // *val += delta;
292 // return *val;
293 TEXT ·Xadd64(SB), NOSPLIT, $0-24
294 MOVD ptr+0(FP), R0
295 MOVD delta+8(FP), R1
296 #ifndef GOARM64_LSE
297 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
298 CBZ R4, load_store_loop
299 #endif
300 LDADDALD R1, (R0), R2
301 ADD R1, R2
302 MOVD R2, ret+16(FP)
303 RET
304 #ifndef GOARM64_LSE
305 load_store_loop:
306 LDAXR (R0), R2
307 ADD R2, R1, R2
308 STLXR R2, (R0), R3
309 CBNZ R3, load_store_loop
310 MOVD R2, ret+16(FP)
311 RET
312 #endif
313
314 TEXT ·Xchgint32(SB), NOSPLIT, $0-20
315 B ·Xchg(SB)
316
317 TEXT ·Xchgint64(SB), NOSPLIT, $0-24
318 B ·Xchg64(SB)
319
320 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
321 B ·Xchg64(SB)
322
323 TEXT ·And8(SB), NOSPLIT, $0-9
324 MOVD ptr+0(FP), R0
325 MOVB val+8(FP), R1
326 #ifndef GOARM64_LSE
327 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
328 CBZ R4, load_store_loop
329 #endif
330 MVN R1, R2
331 LDCLRALB R2, (R0), R3
332 RET
333 #ifndef GOARM64_LSE
334 load_store_loop:
335 LDAXRB (R0), R2
336 AND R1, R2
337 STLXRB R2, (R0), R3
338 CBNZ R3, load_store_loop
339 RET
340 #endif
341
342 TEXT ·Or8(SB), NOSPLIT, $0-9
343 MOVD ptr+0(FP), R0
344 MOVB val+8(FP), R1
345 #ifndef GOARM64_LSE
346 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
347 CBZ R4, load_store_loop
348 #endif
349 LDORALB R1, (R0), R2
350 RET
351 #ifndef GOARM64_LSE
352 load_store_loop:
353 LDAXRB (R0), R2
354 ORR R1, R2
355 STLXRB R2, (R0), R3
356 CBNZ R3, load_store_loop
357 RET
358 #endif
359
360 // func And(addr *uint32, v uint32)
361 TEXT ·And(SB), NOSPLIT, $0-12
362 MOVD ptr+0(FP), R0
363 MOVW val+8(FP), R1
364 #ifndef GOARM64_LSE
365 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
366 CBZ R4, load_store_loop
367 #endif
368 MVN R1, R2
369 LDCLRALW R2, (R0), R3
370 RET
371 #ifndef GOARM64_LSE
372 load_store_loop:
373 LDAXRW (R0), R2
374 AND R1, R2
375 STLXRW R2, (R0), R3
376 CBNZ R3, load_store_loop
377 RET
378 #endif
379
380 // func Or(addr *uint32, v uint32)
381 TEXT ·Or(SB), NOSPLIT, $0-12
382 MOVD ptr+0(FP), R0
383 MOVW val+8(FP), R1
384 #ifndef GOARM64_LSE
385 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
386 CBZ R4, load_store_loop
387 #endif
388 LDORALW R1, (R0), R2
389 RET
390 #ifndef GOARM64_LSE
391 load_store_loop:
392 LDAXRW (R0), R2
393 ORR R1, R2
394 STLXRW R2, (R0), R3
395 CBNZ R3, load_store_loop
396 RET
397 #endif
398
399 // func Or32(addr *uint32, v uint32) old uint32
400 TEXT ·Or32(SB), NOSPLIT, $0-20
401 MOVD ptr+0(FP), R0
402 MOVW val+8(FP), R1
403 #ifndef GOARM64_LSE
404 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
405 CBZ R4, load_store_loop
406 #endif
407 LDORALW R1, (R0), R2
408 MOVD R2, ret+16(FP)
409 RET
410 #ifndef GOARM64_LSE
411 load_store_loop:
412 LDAXRW (R0), R2
413 ORR R1, R2, R3
414 STLXRW R3, (R0), R4
415 CBNZ R4, load_store_loop
416 MOVD R2, ret+16(FP)
417 RET
418 #endif
419
420 // func And32(addr *uint32, v uint32) old uint32
421 TEXT ·And32(SB), NOSPLIT, $0-20
422 MOVD ptr+0(FP), R0
423 MOVW val+8(FP), R1
424 #ifndef GOARM64_LSE
425 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
426 CBZ R4, load_store_loop
427 #endif
428 MVN R1, R2
429 LDCLRALW R2, (R0), R3
430 MOVD R3, ret+16(FP)
431 RET
432 #ifndef GOARM64_LSE
433 load_store_loop:
434 LDAXRW (R0), R2
435 AND R1, R2, R3
436 STLXRW R3, (R0), R4
437 CBNZ R4, load_store_loop
438 MOVD R2, ret+16(FP)
439 RET
440 #endif
441
442 // func Or64(addr *uint64, v uint64) old uint64
443 TEXT ·Or64(SB), NOSPLIT, $0-24
444 MOVD ptr+0(FP), R0
445 MOVD val+8(FP), R1
446 #ifndef GOARM64_LSE
447 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
448 CBZ R4, load_store_loop
449 #endif
450 LDORALD R1, (R0), R2
451 MOVD R2, ret+16(FP)
452 RET
453 #ifndef GOARM64_LSE
454 load_store_loop:
455 LDAXR (R0), R2
456 ORR R1, R2, R3
457 STLXR R3, (R0), R4
458 CBNZ R4, load_store_loop
459 MOVD R2, ret+16(FP)
460 RET
461 #endif
462
463 // func And64(addr *uint64, v uint64) old uint64
464 TEXT ·And64(SB), NOSPLIT, $0-24
465 MOVD ptr+0(FP), R0
466 MOVD val+8(FP), R1
467 #ifndef GOARM64_LSE
468 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
469 CBZ R4, load_store_loop
470 #endif
471 MVN R1, R2
472 LDCLRALD R2, (R0), R3
473 MOVD R3, ret+16(FP)
474 RET
475 #ifndef GOARM64_LSE
476 load_store_loop:
477 LDAXR (R0), R2
478 AND R1, R2, R3
479 STLXR R3, (R0), R4
480 CBNZ R4, load_store_loop
481 MOVD R2, ret+16(FP)
482 RET
483 #endif
484
485 // func Anduintptr(addr *uintptr, v uintptr) old uintptr
486 TEXT ·Anduintptr(SB), NOSPLIT, $0-24
487 B ·And64(SB)
488
489 // func Oruintptr(addr *uintptr, v uintptr) old uintptr
490 TEXT ·Oruintptr(SB), NOSPLIT, $0-24
491 B ·Or64(SB)
492
View as plain text