1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build mips || mipsle
6
7 #include "textflag.h"
8
9 // bool Cas(int32 *val, int32 old, int32 new)
10 // Atomically:
11 // if(*val == old){
12 // *val = new;
13 // return 1;
14 // } else
15 // return 0;
16 TEXT ·Cas(SB),NOSPLIT,$0-13
17 MOVW ptr+0(FP), R1
18 MOVW old+4(FP), R2
19 MOVW new+8(FP), R5
20 SYNC
21 try_cas:
22 MOVW R5, R3
23 LL (R1), R4 // R4 = *R1
24 BNE R2, R4, cas_fail
25 SC R3, (R1) // *R1 = R3
26 BEQ R3, try_cas
27 SYNC
28 MOVB R3, ret+12(FP)
29 RET
30 cas_fail:
31 SYNC
32 MOVB R0, ret+12(FP)
33 RET
34
35 TEXT ·Store(SB),NOSPLIT,$0-8
36 MOVW ptr+0(FP), R1
37 MOVW val+4(FP), R2
38 SYNC
39 MOVW R2, 0(R1)
40 SYNC
41 RET
42
43 TEXT ·Store8(SB),NOSPLIT,$0-5
44 MOVW ptr+0(FP), R1
45 MOVB val+4(FP), R2
46 SYNC
47 MOVB R2, 0(R1)
48 SYNC
49 RET
50
51 TEXT ·Load(SB),NOSPLIT,$0-8
52 MOVW ptr+0(FP), R1
53 SYNC
54 MOVW 0(R1), R1
55 SYNC
56 MOVW R1, ret+4(FP)
57 RET
58
59 TEXT ·Load8(SB),NOSPLIT,$0-5
60 MOVW ptr+0(FP), R1
61 SYNC
62 MOVB 0(R1), R1
63 SYNC
64 MOVB R1, ret+4(FP)
65 RET
66
67 // uint32 Xadd(uint32 volatile *val, int32 delta)
68 // Atomically:
69 // *val += delta;
70 // return *val;
71 TEXT ·Xadd(SB),NOSPLIT,$0-12
72 MOVW ptr+0(FP), R2
73 MOVW delta+4(FP), R3
74 SYNC
75 try_xadd:
76 LL (R2), R1 // R1 = *R2
77 ADDU R1, R3, R4
78 MOVW R4, R1
79 SC R4, (R2) // *R2 = R4
80 BEQ R4, try_xadd
81 SYNC
82 MOVW R1, ret+8(FP)
83 RET
84
85 // uint32 Xchg(ptr *uint32, new uint32)
86 // Atomically:
87 // old := *ptr;
88 // *ptr = new;
89 // return old;
90 TEXT ·Xchg(SB),NOSPLIT,$0-12
91 MOVW ptr+0(FP), R2
92 MOVW new+4(FP), R5
93 SYNC
94 try_xchg:
95 MOVW R5, R3
96 LL (R2), R1 // R1 = *R2
97 SC R3, (R2) // *R2 = R3
98 BEQ R3, try_xchg
99 SYNC
100 MOVW R1, ret+8(FP)
101 RET
102
103 TEXT ·Casint32(SB),NOSPLIT,$0-13
104 JMP ·Cas(SB)
105
106 TEXT ·Casint64(SB),NOSPLIT,$0-21
107 JMP ·Cas64(SB)
108
109 TEXT ·Casuintptr(SB),NOSPLIT,$0-13
110 JMP ·Cas(SB)
111
112 TEXT ·CasRel(SB),NOSPLIT,$0-13
113 JMP ·Cas(SB)
114
115 TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
116 JMP ·Load(SB)
117
118 TEXT ·Loaduint(SB),NOSPLIT,$0-8
119 JMP ·Load(SB)
120
121 TEXT ·Loadp(SB),NOSPLIT,$-0-8
122 JMP ·Load(SB)
123
124 TEXT ·Storeint32(SB),NOSPLIT,$0-8
125 JMP ·Store(SB)
126
127 TEXT ·Storeint64(SB),NOSPLIT,$0-12
128 JMP ·Store64(SB)
129
130 TEXT ·Storeuintptr(SB),NOSPLIT,$0-8
131 JMP ·Store(SB)
132
133 TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
134 JMP ·Xadd(SB)
135
136 TEXT ·Loadint32(SB),NOSPLIT,$0-8
137 JMP ·Load(SB)
138
139 TEXT ·Loadint64(SB),NOSPLIT,$0-12
140 JMP ·Load64(SB)
141
142 TEXT ·Xaddint32(SB),NOSPLIT,$0-12
143 JMP ·Xadd(SB)
144
145 TEXT ·Xaddint64(SB),NOSPLIT,$0-20
146 JMP ·Xadd64(SB)
147
148 TEXT ·Casp1(SB),NOSPLIT,$0-13
149 JMP ·Cas(SB)
150
151 TEXT ·Xchgint32(SB),NOSPLIT,$0-12
152 JMP ·Xchg(SB)
153
154 TEXT ·Xchgint64(SB),NOSPLIT,$0-20
155 JMP ·Xchg64(SB)
156
157 TEXT ·Xchguintptr(SB),NOSPLIT,$0-12
158 JMP ·Xchg(SB)
159
160 TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
161 JMP ·Store(SB)
162
163 TEXT ·StoreRel(SB),NOSPLIT,$0-8
164 JMP ·Store(SB)
165
166 TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8
167 JMP ·Store(SB)
168
169 // void Or8(byte volatile*, byte);
170 TEXT ·Or8(SB),NOSPLIT,$0-5
171 MOVW ptr+0(FP), R1
172 MOVBU val+4(FP), R2
173 MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store.
174 AND R1, R3
175 #ifdef GOARCH_mips
176 // Big endian. ptr = ptr ^ 3
177 XOR $3, R1
178 #endif
179 AND $3, R1, R4 // R4 = ((ptr & 3) * 8)
180 SLL $3, R4
181 SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4
182 SYNC
183 try_or8:
184 LL (R3), R4 // R4 = *R3
185 OR R2, R4
186 SC R4, (R3) // *R3 = R4
187 BEQ R4, try_or8
188 SYNC
189 RET
190
191 // void And8(byte volatile*, byte);
192 TEXT ·And8(SB),NOSPLIT,$0-5
193 MOVW ptr+0(FP), R1
194 MOVBU val+4(FP), R2
195 MOVW $~3, R3
196 AND R1, R3
197 #ifdef GOARCH_mips
198 // Big endian. ptr = ptr ^ 3
199 XOR $3, R1
200 #endif
201 AND $3, R1, R4 // R4 = ((ptr & 3) * 8)
202 SLL $3, R4
203 MOVW $0xFF, R5
204 SLL R4, R2
205 SLL R4, R5
206 NOR R0, R5
207 OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
208 SYNC
209 try_and8:
210 LL (R3), R4 // R4 = *R3
211 AND R2, R4
212 SC R4, (R3) // *R3 = R4
213 BEQ R4, try_and8
214 SYNC
215 RET
216
217 // func Or(addr *uint32, v uint32)
218 TEXT ·Or(SB), NOSPLIT, $0-8
219 MOVW ptr+0(FP), R1
220 MOVW val+4(FP), R2
221
222 SYNC
223 LL (R1), R3
224 OR R2, R3
225 SC R3, (R1)
226 BEQ R3, -4(PC)
227 SYNC
228 RET
229
230 // func And(addr *uint32, v uint32)
231 TEXT ·And(SB), NOSPLIT, $0-8
232 MOVW ptr+0(FP), R1
233 MOVW val+4(FP), R2
234
235 SYNC
236 LL (R1), R3
237 AND R2, R3
238 SC R3, (R1)
239 BEQ R3, -4(PC)
240 SYNC
241 RET
242
243 // func Or32(addr *uint32, v uint32) old uint32
244 TEXT ·Or32(SB), NOSPLIT, $0-12
245 MOVW ptr+0(FP), R1
246 MOVW val+4(FP), R2
247
248 SYNC
249 LL (R1), R3
250 OR R2, R3, R4
251 SC R4, (R1)
252 BEQ R4, -4(PC)
253 SYNC
254 MOVW R3, ret+8(FP)
255 RET
256
257 // func And32(addr *uint32, v uint32) old uint32
258 TEXT ·And32(SB), NOSPLIT, $0-12
259 MOVW ptr+0(FP), R1
260 MOVW val+4(FP), R2
261
262 SYNC
263 LL (R1), R3
264 AND R2, R3, R4
265 SC R4, (R1)
266 BEQ R4, -4(PC)
267 SYNC
268 MOVW R3, ret+8(FP)
269 RET
270
271 // func Anduintptr(addr *uintptr, v uintptr) old uintptr
272 TEXT ·Anduintptr(SB), NOSPLIT, $0-12
273 JMP ·And32(SB)
274
275 // func Oruintptr(addr *uintptr, v uintptr) old uintptr
276 TEXT ·Oruintptr(SB), NOSPLIT, $0-12
277 JMP ·Or32(SB)
278
279 TEXT ·spinLock(SB),NOSPLIT,$0-4
280 MOVW state+0(FP), R1
281 MOVW $1, R2
282 SYNC
283 try_lock:
284 MOVW R2, R3
285 check_again:
286 LL (R1), R4
287 BNE R4, check_again
288 SC R3, (R1)
289 BEQ R3, try_lock
290 SYNC
291 RET
292
293 TEXT ·spinUnlock(SB),NOSPLIT,$0-4
294 MOVW state+0(FP), R1
295 SYNC
296 MOVW R0, (R1)
297 SYNC
298 RET
299
View as plain text