1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build mips64 || mips64le
6
7 #include "textflag.h"
8
9 #define SYNC WORD $0xf
10
11 // bool cas(uint32 *ptr, uint32 old, uint32 new)
12 // Atomically:
13 // if(*val == old){
14 // *val = new;
15 // return 1;
16 // } else
17 // return 0;
18 TEXT ·Cas(SB), NOSPLIT, $0-17
19 MOVV ptr+0(FP), R1
20 MOVW old+8(FP), R2
21 MOVW new+12(FP), R5
22 SYNC
23 cas_again:
24 MOVV R5, R3
25 LL (R1), R4
26 BNE R2, R4, cas_fail
27 SC R3, (R1)
28 BEQ R3, cas_again
29 MOVV $1, R1
30 MOVB R1, ret+16(FP)
31 SYNC
32 RET
33 cas_fail:
34 MOVV $0, R1
35 JMP -4(PC)
36
37 // bool cas64(uint64 *ptr, uint64 old, uint64 new)
38 // Atomically:
39 // if(*val == old){
40 // *val = new;
41 // return 1;
42 // } else {
43 // return 0;
44 // }
45 TEXT ·Cas64(SB), NOSPLIT, $0-25
46 MOVV ptr+0(FP), R1
47 MOVV old+8(FP), R2
48 MOVV new+16(FP), R5
49 SYNC
50 cas64_again:
51 MOVV R5, R3
52 LLV (R1), R4
53 BNE R2, R4, cas64_fail
54 SCV R3, (R1)
55 BEQ R3, cas64_again
56 MOVV $1, R1
57 MOVB R1, ret+24(FP)
58 SYNC
59 RET
60 cas64_fail:
61 MOVV $0, R1
62 JMP -4(PC)
63
64 TEXT ·Casint32(SB), NOSPLIT, $0-17
65 JMP ·Cas(SB)
66
67 TEXT ·Casint64(SB), NOSPLIT, $0-25
68 JMP ·Cas64(SB)
69
70 TEXT ·Casuintptr(SB), NOSPLIT, $0-25
71 JMP ·Cas64(SB)
72
73 TEXT ·CasRel(SB), NOSPLIT, $0-17
74 JMP ·Cas(SB)
75
76 TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
77 JMP ·Load64(SB)
78
79 TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
80 JMP ·Load64(SB)
81
82 TEXT ·Storeint32(SB), NOSPLIT, $0-12
83 JMP ·Store(SB)
84
85 TEXT ·Storeint64(SB), NOSPLIT, $0-16
86 JMP ·Store64(SB)
87
88 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
89 JMP ·Store64(SB)
90
91 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
92 JMP ·Xadd64(SB)
93
94 TEXT ·Loadint32(SB), NOSPLIT, $0-12
95 JMP ·Load(SB)
96
97 TEXT ·Loadint64(SB), NOSPLIT, $0-16
98 JMP ·Load64(SB)
99
100 TEXT ·Xaddint32(SB), NOSPLIT, $0-20
101 JMP ·Xadd(SB)
102
103 TEXT ·Xaddint64(SB), NOSPLIT, $0-24
104 JMP ·Xadd64(SB)
105
106 // bool casp(void **val, void *old, void *new)
107 // Atomically:
108 // if(*val == old){
109 // *val = new;
110 // return 1;
111 // } else
112 // return 0;
113 TEXT ·Casp1(SB), NOSPLIT, $0-25
114 JMP ·Cas64(SB)
115
116 // uint32 xadd(uint32 volatile *ptr, int32 delta)
117 // Atomically:
118 // *val += delta;
119 // return *val;
120 TEXT ·Xadd(SB), NOSPLIT, $0-20
121 MOVV ptr+0(FP), R2
122 MOVW delta+8(FP), R3
123 SYNC
124 LL (R2), R1
125 ADDU R1, R3, R4
126 MOVV R4, R1
127 SC R4, (R2)
128 BEQ R4, -4(PC)
129 MOVW R1, ret+16(FP)
130 SYNC
131 RET
132
133 // uint64 Xadd64(uint64 volatile *ptr, int64 delta)
134 // Atomically:
135 // *val += delta;
136 // return *val;
137 TEXT ·Xadd64(SB), NOSPLIT, $0-24
138 MOVV ptr+0(FP), R2
139 MOVV delta+8(FP), R3
140 SYNC
141 LLV (R2), R1
142 ADDVU R1, R3, R4
143 MOVV R4, R1
144 SCV R4, (R2)
145 BEQ R4, -4(PC)
146 MOVV R1, ret+16(FP)
147 SYNC
148 RET
149
150 // uint32 Xchg(ptr *uint32, new uint32)
151 // Atomically:
152 // old := *ptr;
153 // *ptr = new;
154 // return old;
155 TEXT ·Xchg(SB), NOSPLIT, $0-20
156 MOVV ptr+0(FP), R2
157 MOVW new+8(FP), R5
158
159 SYNC
160 MOVV R5, R3
161 LL (R2), R1
162 SC R3, (R2)
163 BEQ R3, -3(PC)
164 MOVW R1, ret+16(FP)
165 SYNC
166 RET
167
168 // uint64 Xchg64(ptr *uint64, new uint64)
169 // Atomically:
170 // old := *ptr;
171 // *ptr = new;
172 // return old;
173 TEXT ·Xchg64(SB), NOSPLIT, $0-24
174 MOVV ptr+0(FP), R2
175 MOVV new+8(FP), R5
176
177 SYNC
178 MOVV R5, R3
179 LLV (R2), R1
180 SCV R3, (R2)
181 BEQ R3, -3(PC)
182 MOVV R1, ret+16(FP)
183 SYNC
184 RET
185
186 TEXT ·Xchgint32(SB), NOSPLIT, $0-20
187 JMP ·Xchg(SB)
188
189 TEXT ·Xchgint64(SB), NOSPLIT, $0-24
190 JMP ·Xchg64(SB)
191
192 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
193 JMP ·Xchg64(SB)
194
195 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
196 JMP ·Store64(SB)
197
198 TEXT ·StoreRel(SB), NOSPLIT, $0-12
199 JMP ·Store(SB)
200
201 TEXT ·StoreRel64(SB), NOSPLIT, $0-16
202 JMP ·Store64(SB)
203
204 TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
205 JMP ·Store64(SB)
206
207 TEXT ·Store(SB), NOSPLIT, $0-12
208 MOVV ptr+0(FP), R1
209 MOVW val+8(FP), R2
210 SYNC
211 MOVW R2, 0(R1)
212 SYNC
213 RET
214
215 TEXT ·Store8(SB), NOSPLIT, $0-9
216 MOVV ptr+0(FP), R1
217 MOVB val+8(FP), R2
218 SYNC
219 MOVB R2, 0(R1)
220 SYNC
221 RET
222
223 TEXT ·Store64(SB), NOSPLIT, $0-16
224 MOVV ptr+0(FP), R1
225 MOVV val+8(FP), R2
226 SYNC
227 MOVV R2, 0(R1)
228 SYNC
229 RET
230
231 // void Or8(byte volatile*, byte);
232 TEXT ·Or8(SB), NOSPLIT, $0-9
233 MOVV ptr+0(FP), R1
234 MOVBU val+8(FP), R2
235 // Align ptr down to 4 bytes so we can use 32-bit load/store.
236 MOVV $~3, R3
237 AND R1, R3
238 // Compute val shift.
239 #ifdef GOARCH_mips64
240 // Big endian. ptr = ptr ^ 3
241 XOR $3, R1
242 #endif
243 // R4 = ((ptr & 3) * 8)
244 AND $3, R1, R4
245 SLLV $3, R4
246 // Shift val for aligned ptr. R2 = val << R4
247 SLLV R4, R2
248
249 SYNC
250 LL (R3), R4
251 OR R2, R4
252 SC R4, (R3)
253 BEQ R4, -4(PC)
254 SYNC
255 RET
256
257 // void And8(byte volatile*, byte);
258 TEXT ·And8(SB), NOSPLIT, $0-9
259 MOVV ptr+0(FP), R1
260 MOVBU val+8(FP), R2
261 // Align ptr down to 4 bytes so we can use 32-bit load/store.
262 MOVV $~3, R3
263 AND R1, R3
264 // Compute val shift.
265 #ifdef GOARCH_mips64
266 // Big endian. ptr = ptr ^ 3
267 XOR $3, R1
268 #endif
269 // R4 = ((ptr & 3) * 8)
270 AND $3, R1, R4
271 SLLV $3, R4
272 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
273 MOVV $0xFF, R5
274 SLLV R4, R2
275 SLLV R4, R5
276 NOR R0, R5
277 OR R5, R2
278
279 SYNC
280 LL (R3), R4
281 AND R2, R4
282 SC R4, (R3)
283 BEQ R4, -4(PC)
284 SYNC
285 RET
286
287 // func Or(addr *uint32, v uint32)
288 TEXT ·Or(SB), NOSPLIT, $0-12
289 MOVV ptr+0(FP), R1
290 MOVW val+8(FP), R2
291
292 SYNC
293 LL (R1), R3
294 OR R2, R3
295 SC R3, (R1)
296 BEQ R3, -4(PC)
297 SYNC
298 RET
299
300 // func And(addr *uint32, v uint32)
301 TEXT ·And(SB), NOSPLIT, $0-12
302 MOVV ptr+0(FP), R1
303 MOVW val+8(FP), R2
304
305 SYNC
306 LL (R1), R3
307 AND R2, R3
308 SC R3, (R1)
309 BEQ R3, -4(PC)
310 SYNC
311 RET
312
313 // func Or32(addr *uint32, v uint32) old uint32
314 TEXT ·Or32(SB), NOSPLIT, $0-20
315 MOVV ptr+0(FP), R1
316 MOVW val+8(FP), R2
317
318 SYNC
319 LL (R1), R3
320 OR R2, R3, R4
321 SC R4, (R1)
322 BEQ R4, -3(PC)
323 SYNC
324 MOVW R3, ret+16(FP)
325 RET
326
327 // func And32(addr *uint32, v uint32) old uint32
328 TEXT ·And32(SB), NOSPLIT, $0-20
329 MOVV ptr+0(FP), R1
330 MOVW val+8(FP), R2
331
332 SYNC
333 LL (R1), R3
334 AND R2, R3, R4
335 SC R4, (R1)
336 BEQ R4, -3(PC)
337 SYNC
338 MOVW R3, ret+16(FP)
339 RET
340
341 // func Or64(addr *uint64, v uint64) old uint64
342 TEXT ·Or64(SB), NOSPLIT, $0-24
343 MOVV ptr+0(FP), R1
344 MOVV val+8(FP), R2
345
346 SYNC
347 LLV (R1), R3
348 OR R2, R3, R4
349 SCV R4, (R1)
350 BEQ R4, -3(PC)
351 SYNC
352 MOVV R3, ret+16(FP)
353 RET
354
355 // func And64(addr *uint64, v uint64) old uint64
356 TEXT ·And64(SB), NOSPLIT, $0-24
357 MOVV ptr+0(FP), R1
358 MOVV val+8(FP), R2
359
360 SYNC
361 LLV (R1), R3
362 AND R2, R3, R4
363 SCV R4, (R1)
364 BEQ R4, -3(PC)
365 SYNC
366 MOVV R3, ret+16(FP)
367 RET
368
369 // func Anduintptr(addr *uintptr, v uintptr) old uintptr
370 TEXT ·Anduintptr(SB), NOSPLIT, $0-24
371 JMP ·And64(SB)
372
373 // func Oruintptr(addr *uintptr, v uintptr) old uintptr
374 TEXT ·Oruintptr(SB), NOSPLIT, $0-24
375 JMP ·Or64(SB)
376
377 // uint32 ·Load(uint32 volatile* ptr)
378 TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
379 MOVV ptr+0(FP), R1
380 SYNC
381 MOVWU 0(R1), R1
382 SYNC
383 MOVW R1, ret+8(FP)
384 RET
385
386 // uint8 ·Load8(uint8 volatile* ptr)
387 TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
388 MOVV ptr+0(FP), R1
389 SYNC
390 MOVBU 0(R1), R1
391 SYNC
392 MOVB R1, ret+8(FP)
393 RET
394
395 // uint64 ·Load64(uint64 volatile* ptr)
396 TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
397 MOVV ptr+0(FP), R1
398 SYNC
399 MOVV 0(R1), R1
400 SYNC
401 MOVV R1, ret+8(FP)
402 RET
403
404 // void *·Loadp(void *volatile *ptr)
405 TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
406 MOVV ptr+0(FP), R1
407 SYNC
408 MOVV 0(R1), R1
409 SYNC
410 MOVV R1, ret+8(FP)
411 RET
412
413 // uint32 ·LoadAcq(uint32 volatile* ptr)
414 TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
415 JMP atomic·Load(SB)
416
417 // uint64 ·LoadAcq64(uint64 volatile* ptr)
418 TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
419 JMP atomic·Load64(SB)
420
421 // uintptr ·LoadAcquintptr(uintptr volatile* ptr)
422 TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
423 JMP atomic·Load64(SB)
424
View as plain text