blob: 05dac43907d119ebb2f45037336e853f24068c2c [file] [log] [blame]
David S. Miller24f287e2007-10-15 16:41:44 -07001/* atomic.S: These things are too big to do inline.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David S. Miller193d2aa2012-11-09 19:37:59 -08003 * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
David S. Miller8695c372012-05-11 20:33:22 -07006#include <linux/linkage.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <asm/asi.h>
David S. Miller24f287e2007-10-15 16:41:44 -07008#include <asm/backoff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 .text
11
12 /* Two versions of the atomic routines, one that
13 * does not return a value and does not perform
14 * memory barriers, and a second which returns
15 * a value and does the barriers.
16 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010018#define ATOMIC_OP(op) \
19ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
20 BACKOFF_SETUP(%o2); \
211: lduw [%o1], %g1; \
22 op %g1, %o0, %g7; \
23 cas [%o1], %g1, %g7; \
24 cmp %g1, %g7; \
25 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
26 nop; \
27 retl; \
28 nop; \
292: BACKOFF_SPIN(%o2, %o3, 1b); \
30ENDPROC(atomic_##op); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010032#define ATOMIC_OP_RETURN(op) \
33ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
34 BACKOFF_SETUP(%o2); \
351: lduw [%o1], %g1; \
36 op %g1, %o0, %g7; \
37 cas [%o1], %g1, %g7; \
38 cmp %g1, %g7; \
39 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
Peter Zijlstracaa17d42014-09-02 11:40:16 +020040 op %g1, %o0, %g1; \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010041 retl; \
42 sra %g1, 0, %o0; \
432: BACKOFF_SPIN(%o2, %o3, 1b); \
44ENDPROC(atomic_##op##_return);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010046#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010048ATOMIC_OPS(add)
49ATOMIC_OPS(sub)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010051#undef ATOMIC_OPS
52#undef ATOMIC_OP_RETURN
53#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010055#define ATOMIC64_OP(op) \
56ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
57 BACKOFF_SETUP(%o2); \
581: ldx [%o1], %g1; \
59 op %g1, %o0, %g7; \
60 casx [%o1], %g1, %g7; \
61 cmp %g1, %g7; \
62 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
63 nop; \
64 retl; \
65 nop; \
662: BACKOFF_SPIN(%o2, %o3, 1b); \
67ENDPROC(atomic64_##op); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010069#define ATOMIC64_OP_RETURN(op) \
70ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
71 BACKOFF_SETUP(%o2); \
721: ldx [%o1], %g1; \
73 op %g1, %o0, %g7; \
74 casx [%o1], %g1, %g7; \
75 cmp %g1, %g7; \
76 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
77 nop; \
78 retl; \
Peter Zijlstracaa17d42014-09-02 11:40:16 +020079 op %g1, %o0, %o0; \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +0100802: BACKOFF_SPIN(%o2, %o3, 1b); \
81ENDPROC(atomic64_##op##_return);
82
83#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
84
85ATOMIC64_OPS(add)
86ATOMIC64_OPS(sub)
87
88#undef ATOMIC64_OPS
89#undef ATOMIC64_OP_RETURN
90#undef ATOMIC64_OP
David S. Miller193d2aa2012-11-09 19:37:59 -080091
92ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
93 BACKOFF_SETUP(%o2)
941: ldx [%o0], %g1
95 brlez,pn %g1, 3f
96 sub %g1, 1, %g7
97 casx [%o0], %g1, %g7
98 cmp %g1, %g7
99 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
100 nop
1013: retl
102 sub %g1, 1, %o0
1032: BACKOFF_SPIN(%o2, %o3, 1b)
104ENDPROC(atomic64_dec_if_positive)