1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5#include "go_asm.h"
6#include "textflag.h"
7
8TEXT ·Casint32(SB), NOSPLIT, $0-17
9 B ·Cas(SB)
10
11TEXT ·Casint64(SB), NOSPLIT, $0-25
12 B ·Cas64(SB)
13
14TEXT ·Casuintptr(SB), NOSPLIT, $0-25
15 B ·Cas64(SB)
16
17TEXT ·CasRel(SB), NOSPLIT, $0-17
18 B ·Cas(SB)
19
20TEXT ·Loadint32(SB), NOSPLIT, $0-12
21 B ·Load(SB)
22
23TEXT ·Loadint64(SB), NOSPLIT, $0-16
24 B ·Load64(SB)
25
26TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
27 B ·Load64(SB)
28
29TEXT ·Loaduint(SB), NOSPLIT, $0-16
30 B ·Load64(SB)
31
32TEXT ·Storeint32(SB), NOSPLIT, $0-12
33 B ·Store(SB)
34
35TEXT ·Storeint64(SB), NOSPLIT, $0-16
36 B ·Store64(SB)
37
38TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
39 B ·Store64(SB)
40
41TEXT ·Xaddint32(SB), NOSPLIT, $0-20
42 B ·Xadd(SB)
43
44TEXT ·Xaddint64(SB), NOSPLIT, $0-24
45 B ·Xadd64(SB)
46
47TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
48 B ·Xadd64(SB)
49
50TEXT ·Casp1(SB), NOSPLIT, $0-25
51 B ·Cas64(SB)
52
53// uint32 ·Load(uint32 volatile* addr)
54TEXT ·Load(SB),NOSPLIT,$0-12
55 MOVD ptr+0(FP), R0
56 LDARW (R0), R0
57 MOVW R0, ret+8(FP)
58 RET
59
60// uint8 ·Load8(uint8 volatile* addr)
61TEXT ·Load8(SB),NOSPLIT,$0-9
62 MOVD ptr+0(FP), R0
63 LDARB (R0), R0
64 MOVB R0, ret+8(FP)
65 RET
66
67// uint64 ·Load64(uint64 volatile* addr)
68TEXT ·Load64(SB),NOSPLIT,$0-16
69 MOVD ptr+0(FP), R0
70 LDAR (R0), R0
71 MOVD R0, ret+8(FP)
72 RET
73
74// void *·Loadp(void *volatile *addr)
75TEXT ·Loadp(SB),NOSPLIT,$0-16
76 MOVD ptr+0(FP), R0
77 LDAR (R0), R0
78 MOVD R0, ret+8(FP)
79 RET
80
81// uint32 ·LoadAcq(uint32 volatile* addr)
82TEXT ·LoadAcq(SB),NOSPLIT,$0-12
83 B ·Load(SB)
84
85// uint64 ·LoadAcquintptr(uint64 volatile* addr)
86TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
87 B ·Load64(SB)
88
89// uintptr ·LoadAcq64(uintptr volatile* addr)
90TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
91 B ·Load64(SB)
92
93TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
94 B ·Store64(SB)
95
96TEXT ·StoreRel(SB), NOSPLIT, $0-12
97 B ·Store(SB)
98
99TEXT ·StoreRel64(SB), NOSPLIT, $0-16
100 B ·Store64(SB)
101
102TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
103 B ·Store64(SB)
104
105TEXT ·Store(SB), NOSPLIT, $0-12
106 MOVD ptr+0(FP), R0
107 MOVW val+8(FP), R1
108 STLRW R1, (R0)
109 RET
110
111TEXT ·Store8(SB), NOSPLIT, $0-9
112 MOVD ptr+0(FP), R0
113 MOVB val+8(FP), R1
114 STLRB R1, (R0)
115 RET
116
117TEXT ·Store64(SB), NOSPLIT, $0-16
118 MOVD ptr+0(FP), R0
119 MOVD val+8(FP), R1
120 STLR R1, (R0)
121 RET
122
123// uint8 Xchg(ptr *uint8, new uint8)
124// Atomically:
125// old := *ptr;
126// *ptr = new;
127// return old;
128TEXT ·Xchg8(SB), NOSPLIT, $0-17
129 MOVD ptr+0(FP), R0
130 MOVB new+8(FP), R1
131#ifndef GOARM64_LSE
132 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
133 CBZ R4, load_store_loop
134#endif
135 SWPALB R1, (R0), R2
136 MOVB R2, ret+16(FP)
137 RET
138#ifndef GOARM64_LSE
139load_store_loop:
140 LDAXRB (R0), R2
141 STLXRB R1, (R0), R3
142 CBNZ R3, load_store_loop
143 MOVB R2, ret+16(FP)
144 RET
145#endif
146
147// uint32 Xchg(ptr *uint32, new uint32)
148// Atomically:
149// old := *ptr;
150// *ptr = new;
151// return old;
152TEXT ·Xchg(SB), NOSPLIT, $0-20
153 MOVD ptr+0(FP), R0
154 MOVW new+8(FP), R1
155#ifndef GOARM64_LSE
156 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
157 CBZ R4, load_store_loop
158#endif
159 SWPALW R1, (R0), R2
160 MOVW R2, ret+16(FP)
161 RET
162#ifndef GOARM64_LSE
163load_store_loop:
164 LDAXRW (R0), R2
165 STLXRW R1, (R0), R3
166 CBNZ R3, load_store_loop
167 MOVW R2, ret+16(FP)
168 RET
169#endif
170
171// uint64 Xchg64(ptr *uint64, new uint64)
172// Atomically:
173// old := *ptr;
174// *ptr = new;
175// return old;
176TEXT ·Xchg64(SB), NOSPLIT, $0-24
177 MOVD ptr+0(FP), R0
178 MOVD new+8(FP), R1
179#ifndef GOARM64_LSE
180 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
181 CBZ R4, load_store_loop
182#endif
183 SWPALD R1, (R0), R2
184 MOVD R2, ret+16(FP)
185 RET
186#ifndef GOARM64_LSE
187load_store_loop:
188 LDAXR (R0), R2
189 STLXR R1, (R0), R3
190 CBNZ R3, load_store_loop
191 MOVD R2, ret+16(FP)
192 RET
193#endif
194
195// bool Cas(uint32 *ptr, uint32 old, uint32 new)
196// Atomically:
197// if(*val == old){
198// *val = new;
199// return 1;
200// } else
201// return 0;
202TEXT ·Cas(SB), NOSPLIT, $0-17
203 MOVD ptr+0(FP), R0
204 MOVW old+8(FP), R1
205 MOVW new+12(FP), R2
206#ifndef GOARM64_LSE
207 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
208 CBZ R4, load_store_loop
209#endif
210 MOVD R1, R3
211 CASALW R3, (R0), R2
212 CMP R1, R3
213 CSET EQ, R0
214 MOVB R0, ret+16(FP)
215 RET
216#ifndef GOARM64_LSE
217load_store_loop:
218 LDAXRW (R0), R3
219 CMPW R1, R3
220 BNE ok
221 STLXRW R2, (R0), R3
222 CBNZ R3, load_store_loop
223ok:
224 CSET EQ, R0
225 MOVB R0, ret+16(FP)
226 RET
227#endif
228
229// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new)
230// Atomically:
231// if(*val == old){
232// *val = new;
233// return 1;
234// } else {
235// return 0;
236// }
237TEXT ·Cas64(SB), NOSPLIT, $0-25
238 MOVD ptr+0(FP), R0
239 MOVD old+8(FP), R1
240 MOVD new+16(FP), R2
241#ifndef GOARM64_LSE
242 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
243 CBZ R4, load_store_loop
244#endif
245 MOVD R1, R3
246 CASALD R3, (R0), R2
247 CMP R1, R3
248 CSET EQ, R0
249 MOVB R0, ret+24(FP)
250 RET
251#ifndef GOARM64_LSE
252load_store_loop:
253 LDAXR (R0), R3
254 CMP R1, R3
255 BNE ok
256 STLXR R2, (R0), R3
257 CBNZ R3, load_store_loop
258ok:
259 CSET EQ, R0
260 MOVB R0, ret+24(FP)
261 RET
262#endif
263
264// uint32 xadd(uint32 volatile *ptr, int32 delta)
265// Atomically:
266// *val += delta;
267// return *val;
268TEXT ·Xadd(SB), NOSPLIT, $0-20
269 MOVD ptr+0(FP), R0
270 MOVW delta+8(FP), R1
271#ifndef GOARM64_LSE
272 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
273 CBZ R4, load_store_loop
274#endif
275 LDADDALW R1, (R0), R2
276 ADD R1, R2
277 MOVW R2, ret+16(FP)
278 RET
279#ifndef GOARM64_LSE
280load_store_loop:
281 LDAXRW (R0), R2
282 ADDW R2, R1, R2
283 STLXRW R2, (R0), R3
284 CBNZ R3, load_store_loop
285 MOVW R2, ret+16(FP)
286 RET
287#endif
288
289// uint64 Xadd64(uint64 volatile *ptr, int64 delta)
290// Atomically:
291// *val += delta;
292// return *val;
293TEXT ·Xadd64(SB), NOSPLIT, $0-24
294 MOVD ptr+0(FP), R0
295 MOVD delta+8(FP), R1
296#ifndef GOARM64_LSE
297 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
298 CBZ R4, load_store_loop
299#endif
300 LDADDALD R1, (R0), R2
301 ADD R1, R2
302 MOVD R2, ret+16(FP)
303 RET
304#ifndef GOARM64_LSE
305load_store_loop:
306 LDAXR (R0), R2
307 ADD R2, R1, R2
308 STLXR R2, (R0), R3
309 CBNZ R3, load_store_loop
310 MOVD R2, ret+16(FP)
311 RET
312#endif
313
314TEXT ·Xchgint32(SB), NOSPLIT, $0-20
315 B ·Xchg(SB)
316
317TEXT ·Xchgint64(SB), NOSPLIT, $0-24
318 B ·Xchg64(SB)
319
320TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
321 B ·Xchg64(SB)
322
323TEXT ·And8(SB), NOSPLIT, $0-9
324 MOVD ptr+0(FP), R0
325 MOVB val+8(FP), R1
326#ifndef GOARM64_LSE
327 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
328 CBZ R4, load_store_loop
329#endif
330 MVN R1, R2
331 LDCLRALB R2, (R0), R3
332 RET
333#ifndef GOARM64_LSE
334load_store_loop:
335 LDAXRB (R0), R2
336 AND R1, R2
337 STLXRB R2, (R0), R3
338 CBNZ R3, load_store_loop
339 RET
340#endif
341
342TEXT ·Or8(SB), NOSPLIT, $0-9
343 MOVD ptr+0(FP), R0
344 MOVB val+8(FP), R1
345#ifndef GOARM64_LSE
346 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
347 CBZ R4, load_store_loop
348#endif
349 LDORALB R1, (R0), R2
350 RET
351#ifndef GOARM64_LSE
352load_store_loop:
353 LDAXRB (R0), R2
354 ORR R1, R2
355 STLXRB R2, (R0), R3
356 CBNZ R3, load_store_loop
357 RET
358#endif
359
360// func And(addr *uint32, v uint32)
361TEXT ·And(SB), NOSPLIT, $0-12
362 MOVD ptr+0(FP), R0
363 MOVW val+8(FP), R1
364#ifndef GOARM64_LSE
365 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
366 CBZ R4, load_store_loop
367#endif
368 MVN R1, R2
369 LDCLRALW R2, (R0), R3
370 RET
371#ifndef GOARM64_LSE
372load_store_loop:
373 LDAXRW (R0), R2
374 AND R1, R2
375 STLXRW R2, (R0), R3
376 CBNZ R3, load_store_loop
377 RET
378#endif
379
380// func Or(addr *uint32, v uint32)
381TEXT ·Or(SB), NOSPLIT, $0-12
382 MOVD ptr+0(FP), R0
383 MOVW val+8(FP), R1
384#ifndef GOARM64_LSE
385 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
386 CBZ R4, load_store_loop
387#endif
388 LDORALW R1, (R0), R2
389 RET
390#ifndef GOARM64_LSE
391load_store_loop:
392 LDAXRW (R0), R2
393 ORR R1, R2
394 STLXRW R2, (R0), R3
395 CBNZ R3, load_store_loop
396 RET
397#endif
398
399// func Or32(addr *uint32, v uint32) old uint32
400TEXT ·Or32(SB), NOSPLIT, $0-20
401 MOVD ptr+0(FP), R0
402 MOVW val+8(FP), R1
403#ifndef GOARM64_LSE
404 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
405 CBZ R4, load_store_loop
406#endif
407 LDORALW R1, (R0), R2
408 MOVD R2, ret+16(FP)
409 RET
410#ifndef GOARM64_LSE
411load_store_loop:
412 LDAXRW (R0), R2
413 ORR R1, R2, R3
414 STLXRW R3, (R0), R4
415 CBNZ R4, load_store_loop
416 MOVD R2, ret+16(FP)
417 RET
418#endif
419
420// func And32(addr *uint32, v uint32) old uint32
421TEXT ·And32(SB), NOSPLIT, $0-20
422 MOVD ptr+0(FP), R0
423 MOVW val+8(FP), R1
424#ifndef GOARM64_LSE
425 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
426 CBZ R4, load_store_loop
427#endif
428 MVN R1, R2
429 LDCLRALW R2, (R0), R3
430 MOVD R3, ret+16(FP)
431 RET
432#ifndef GOARM64_LSE
433load_store_loop:
434 LDAXRW (R0), R2
435 AND R1, R2, R3
436 STLXRW R3, (R0), R4
437 CBNZ R4, load_store_loop
438 MOVD R2, ret+16(FP)
439 RET
440#endif
441
442// func Or64(addr *uint64, v uint64) old uint64
443TEXT ·Or64(SB), NOSPLIT, $0-24
444 MOVD ptr+0(FP), R0
445 MOVD val+8(FP), R1
446#ifndef GOARM64_LSE
447 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
448 CBZ R4, load_store_loop
449#endif
450 LDORALD R1, (R0), R2
451 MOVD R2, ret+16(FP)
452 RET
453#ifndef GOARM64_LSE
454load_store_loop:
455 LDAXR (R0), R2
456 ORR R1, R2, R3
457 STLXR R3, (R0), R4
458 CBNZ R4, load_store_loop
459 MOVD R2, ret+16(FP)
460 RET
461#endif
462
463// func And64(addr *uint64, v uint64) old uint64
464TEXT ·And64(SB), NOSPLIT, $0-24
465 MOVD ptr+0(FP), R0
466 MOVD val+8(FP), R1
467#ifndef GOARM64_LSE
468 MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
469 CBZ R4, load_store_loop
470#endif
471 MVN R1, R2
472 LDCLRALD R2, (R0), R3
473 MOVD R3, ret+16(FP)
474 RET
475#ifndef GOARM64_LSE
476load_store_loop:
477 LDAXR (R0), R2
478 AND R1, R2, R3
479 STLXR R3, (R0), R4
480 CBNZ R4, load_store_loop
481 MOVD R2, ret+16(FP)
482 RET
483#endif
484
485// func Anduintptr(addr *uintptr, v uintptr) old uintptr
486TEXT ·Anduintptr(SB), NOSPLIT, $0-24
487 B ·And64(SB)
488
489// func Oruintptr(addr *uintptr, v uintptr) old uintptr
490TEXT ·Oruintptr(SB), NOSPLIT, $0-24
491 B ·Or64(SB)
View as plain text