Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 1 | #ifndef __ARCH_M68K_ATOMIC__ |
| 2 | #define __ARCH_M68K_ATOMIC__ |
| 3 | |
| 4 | #include <linux/types.h> |
David Howells | 803f691 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 5 | #include <linux/irqflags.h> |
Greg Ungerer | 7224c0d | 2012-03-30 15:52:09 +1000 | [diff] [blame] | 6 | #include <asm/cmpxchg.h> |
Peter Zijlstra | 2db56e8 | 2014-03-13 19:00:36 +0100 | [diff] [blame] | 7 | #include <asm/barrier.h> |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 8 | |
| 9 | /* |
| 10 | * Atomic operations that C can't guarantee us. Useful for |
| 11 | * resource counting etc.. |
| 12 | */ |
| 13 | |
| 14 | /* |
| 15 | * We do not have SMP m68k systems, so we don't have to deal with that. |
| 16 | */ |
| 17 | |
| 18 | #define ATOMIC_INIT(i) { (i) } |
| 19 | |
Peter Zijlstra | 62e8a32 | 2015-09-18 11:13:10 +0200 | [diff] [blame] | 20 | #define atomic_read(v) READ_ONCE((v)->counter) |
| 21 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * The ColdFire parts cannot do some immediate to memory operations, |
| 25 | * so for them we do not specify the "i" asm constraint. |
| 26 | */ |
| 27 | #ifdef CONFIG_COLDFIRE |
| 28 | #define ASM_DI "d" |
Sam Ravnborg | 4914802 | 2009-01-16 21:58:10 +1000 | [diff] [blame] | 29 | #else |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 30 | #define ASM_DI "di" |
Sam Ravnborg | 4914802 | 2009-01-16 21:58:10 +1000 | [diff] [blame] | 31 | #endif |
Geert Uytterhoeven | b417b71 | 2010-05-23 10:44:30 +0200 | [diff] [blame] | 32 | |
Peter Zijlstra | d839bae | 2014-03-23 19:06:34 +0100 | [diff] [blame] | 33 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 34 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 35 | { \ |
| 36 | __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\ |
| 37 | } \ |
| 38 | |
| 39 | #ifdef CONFIG_RMW_INSNS |
| 40 | |
| 41 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 42 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 43 | { \ |
| 44 | int t, tmp; \ |
| 45 | \ |
| 46 | __asm__ __volatile__( \ |
| 47 | "1: movel %2,%1\n" \ |
| 48 | " " #asm_op "l %3,%1\n" \ |
| 49 | " casl %2,%1,%0\n" \ |
| 50 | " jne 1b" \ |
| 51 | : "+m" (*v), "=&d" (t), "=&d" (tmp) \ |
| 52 | : "g" (i), "2" (atomic_read(v))); \ |
| 53 | return t; \ |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 54 | } |
| 55 | |
Peter Zijlstra | d839bae | 2014-03-23 19:06:34 +0100 | [diff] [blame] | 56 | #else |
| 57 | |
| 58 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 59 | static inline int atomic_##op##_return(int i, atomic_t * v) \ |
| 60 | { \ |
| 61 | unsigned long flags; \ |
| 62 | int t; \ |
| 63 | \ |
| 64 | local_irq_save(flags); \ |
| 65 | t = (v->counter c_op i); \ |
| 66 | local_irq_restore(flags); \ |
| 67 | \ |
| 68 | return t; \ |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 69 | } |
| 70 | |
Peter Zijlstra | d839bae | 2014-03-23 19:06:34 +0100 | [diff] [blame] | 71 | #endif /* CONFIG_RMW_INSNS */ |
| 72 | |
| 73 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 74 | ATOMIC_OP(op, c_op, asm_op) \ |
| 75 | ATOMIC_OP_RETURN(op, c_op, asm_op) |
| 76 | |
| 77 | ATOMIC_OPS(add, +=, add) |
| 78 | ATOMIC_OPS(sub, -=, sub) |
| 79 | |
Peter Zijlstra | 74b1bc5 | 2014-04-23 19:56:20 +0200 | [diff] [blame] | 80 | ATOMIC_OP(and, &=, and) |
| 81 | ATOMIC_OP(or, |=, or) |
| 82 | ATOMIC_OP(xor, ^=, eor) |
| 83 | |
Peter Zijlstra | d839bae | 2014-03-23 19:06:34 +0100 | [diff] [blame] | 84 | #undef ATOMIC_OPS |
| 85 | #undef ATOMIC_OP_RETURN |
| 86 | #undef ATOMIC_OP |
| 87 | |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 88 | static inline void atomic_inc(atomic_t *v) |
| 89 | { |
| 90 | __asm__ __volatile__("addql #1,%0" : "+m" (*v)); |
| 91 | } |
| 92 | |
| 93 | static inline void atomic_dec(atomic_t *v) |
| 94 | { |
| 95 | __asm__ __volatile__("subql #1,%0" : "+m" (*v)); |
| 96 | } |
| 97 | |
| 98 | static inline int atomic_dec_and_test(atomic_t *v) |
| 99 | { |
| 100 | char c; |
| 101 | __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); |
| 102 | return c != 0; |
| 103 | } |
| 104 | |
Greg Ungerer | 83b73d6 | 2011-10-18 15:07:29 +1000 | [diff] [blame] | 105 | static inline int atomic_dec_and_test_lt(atomic_t *v) |
| 106 | { |
| 107 | char c; |
| 108 | __asm__ __volatile__( |
| 109 | "subql #1,%1; slt %0" |
| 110 | : "=d" (c), "=m" (*v) |
| 111 | : "m" (*v)); |
| 112 | return c != 0; |
| 113 | } |
| 114 | |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 115 | static inline int atomic_inc_and_test(atomic_t *v) |
| 116 | { |
| 117 | char c; |
| 118 | __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); |
| 119 | return c != 0; |
| 120 | } |
| 121 | |
| 122 | #ifdef CONFIG_RMW_INSNS |
| 123 | |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 124 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) |
| 125 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
| 126 | |
| 127 | #else /* !CONFIG_RMW_INSNS */ |
| 128 | |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 129 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
| 130 | { |
| 131 | unsigned long flags; |
| 132 | int prev; |
| 133 | |
| 134 | local_irq_save(flags); |
| 135 | prev = atomic_read(v); |
| 136 | if (prev == old) |
| 137 | atomic_set(v, new); |
| 138 | local_irq_restore(flags); |
| 139 | return prev; |
| 140 | } |
| 141 | |
| 142 | static inline int atomic_xchg(atomic_t *v, int new) |
| 143 | { |
| 144 | unsigned long flags; |
| 145 | int prev; |
| 146 | |
| 147 | local_irq_save(flags); |
| 148 | prev = atomic_read(v); |
| 149 | atomic_set(v, new); |
| 150 | local_irq_restore(flags); |
| 151 | return prev; |
| 152 | } |
| 153 | |
| 154 | #endif /* !CONFIG_RMW_INSNS */ |
| 155 | |
| 156 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) |
| 157 | #define atomic_inc_return(v) atomic_add_return(1, (v)) |
| 158 | |
| 159 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
| 160 | { |
| 161 | char c; |
| 162 | __asm__ __volatile__("subl %2,%1; seq %0" |
| 163 | : "=d" (c), "+m" (*v) |
| 164 | : ASM_DI (i)); |
| 165 | return c != 0; |
| 166 | } |
| 167 | |
| 168 | static inline int atomic_add_negative(int i, atomic_t *v) |
| 169 | { |
| 170 | char c; |
| 171 | __asm__ __volatile__("addl %2,%1; smi %0" |
| 172 | : "=d" (c), "+m" (*v) |
Greg Ungerer | 35de674 | 2011-07-04 15:30:55 +1000 | [diff] [blame] | 173 | : ASM_DI (i)); |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 174 | return c != 0; |
| 175 | } |
| 176 | |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 177 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 178 | { |
| 179 | int c, old; |
| 180 | c = atomic_read(v); |
| 181 | for (;;) { |
| 182 | if (unlikely(c == (u))) |
| 183 | break; |
| 184 | old = atomic_cmpxchg((v), c, c + (a)); |
| 185 | if (likely(old == c)) |
| 186 | break; |
| 187 | c = old; |
| 188 | } |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 189 | return c; |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 190 | } |
| 191 | |
Greg Ungerer | 69f9974 | 2010-09-08 10:31:11 +1000 | [diff] [blame] | 192 | #endif /* __ARCH_M68K_ATOMIC __ */ |