Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_ATOMIC_H |
| 2 | #define __ASM_SH_ATOMIC_H |
| 3 | |
| 4 | /* |
| 5 | * Atomic operations that C can't guarantee us. Useful for |
| 6 | * resource counting etc.. |
| 7 | * |
| 8 | */ |
| 9 | |
| 10 | typedef struct { volatile int counter; } atomic_t; |
| 11 | |
| 12 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
| 13 | |
| 14 | #define atomic_read(v) ((v)->counter) |
| 15 | #define atomic_set(v,i) ((v)->counter = (i)) |
| 16 | |
Paul Mundt | e4c2cfe | 2006-09-27 12:31:01 +0900 | [diff] [blame] | 17 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/system.h> |
| 19 | |
| 20 | /* |
| 21 | * To get proper branch prediction for the main line, we must branch |
| 22 | * forward to code at the end of this object's .text section, then |
| 23 | * branch back to restart the operation. |
| 24 | */ |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 25 | static inline void atomic_add(int i, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 27 | #ifdef CONFIG_CPU_SH4A |
| 28 | unsigned long tmp; |
| 29 | |
| 30 | __asm__ __volatile__ ( |
| 31 | "1: movli.l @%3, %0 ! atomic_add \n" |
| 32 | " add %2, %0 \n" |
| 33 | " movco.l %0, @%3 \n" |
| 34 | " bf 1b \n" |
| 35 | : "=&z" (tmp), "=r" (&v->counter) |
| 36 | : "r" (i), "r" (&v->counter) |
| 37 | : "t"); |
| 38 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | unsigned long flags; |
| 40 | |
| 41 | local_irq_save(flags); |
| 42 | *(long *)v += i; |
| 43 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 44 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | } |
| 46 | |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 47 | static inline void atomic_sub(int i, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 49 | #ifdef CONFIG_CPU_SH4A |
| 50 | unsigned long tmp; |
| 51 | |
| 52 | __asm__ __volatile__ ( |
| 53 | "1: movli.l @%3, %0 ! atomic_sub \n" |
| 54 | " sub %2, %0 \n" |
| 55 | " movco.l %0, @%3 \n" |
| 56 | " bf 1b \n" |
| 57 | : "=&z" (tmp), "=r" (&v->counter) |
| 58 | : "r" (i), "r" (&v->counter) |
| 59 | : "t"); |
| 60 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | unsigned long flags; |
| 62 | |
| 63 | local_irq_save(flags); |
| 64 | *(long *)v -= i; |
| 65 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 66 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 69 | /* |
| 70 | * SH-4A note: |
| 71 | * |
| 72 | * We basically get atomic_xxx_return() for free compared with |
| 73 | * atomic_xxx(). movli.l/movco.l require r0 due to the instruction |
| 74 | * encoding, so the retval is automatically set without having to |
| 75 | * do any special work. |
| 76 | */ |
| 77 | static inline int atomic_add_return(int i, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 79 | unsigned long temp; |
| 80 | |
| 81 | #ifdef CONFIG_CPU_SH4A |
| 82 | __asm__ __volatile__ ( |
| 83 | "1: movli.l @%3, %0 ! atomic_add_return \n" |
| 84 | " add %2, %0 \n" |
| 85 | " movco.l %0, @%3 \n" |
| 86 | " bf 1b \n" |
| 87 | " synco \n" |
| 88 | : "=&z" (temp), "=r" (&v->counter) |
| 89 | : "r" (i), "r" (&v->counter) |
| 90 | : "t"); |
| 91 | #else |
| 92 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | local_irq_save(flags); |
| 95 | temp = *(long *)v; |
| 96 | temp += i; |
| 97 | *(long *)v = temp; |
| 98 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 99 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
| 101 | return temp; |
| 102 | } |
| 103 | |
| 104 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
| 105 | |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 106 | static inline int atomic_sub_return(int i, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 108 | unsigned long temp; |
| 109 | |
| 110 | #ifdef CONFIG_CPU_SH4A |
| 111 | __asm__ __volatile__ ( |
| 112 | "1: movli.l @%3, %0 ! atomic_sub_return \n" |
| 113 | " sub %2, %0 \n" |
| 114 | " movco.l %0, @%3 \n" |
| 115 | " bf 1b \n" |
| 116 | " synco \n" |
| 117 | : "=&z" (temp), "=r" (&v->counter) |
| 118 | : "r" (i), "r" (&v->counter) |
| 119 | : "t"); |
| 120 | #else |
| 121 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
| 123 | local_irq_save(flags); |
| 124 | temp = *(long *)v; |
| 125 | temp -= i; |
| 126 | *(long *)v = temp; |
| 127 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 128 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
| 130 | return temp; |
| 131 | } |
| 132 | |
| 133 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
| 134 | #define atomic_inc_return(v) atomic_add_return(1,(v)) |
| 135 | |
| 136 | /* |
| 137 | * atomic_inc_and_test - increment and test |
| 138 | * @v: pointer of type atomic_t |
| 139 | * |
| 140 | * Atomically increments @v by 1 |
| 141 | * and returns true if the result is zero, or false for all |
| 142 | * other cases. |
| 143 | */ |
| 144 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
| 145 | |
| 146 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
| 147 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
| 148 | |
| 149 | #define atomic_inc(v) atomic_add(1,(v)) |
| 150 | #define atomic_dec(v) atomic_sub(1,(v)) |
| 151 | |
Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 152 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
| 153 | { |
| 154 | int ret; |
| 155 | unsigned long flags; |
| 156 | |
| 157 | local_irq_save(flags); |
| 158 | ret = v->counter; |
| 159 | if (likely(ret == old)) |
| 160 | v->counter = new; |
| 161 | local_irq_restore(flags); |
| 162 | |
| 163 | return ret; |
| 164 | } |
| 165 | |
Ingo Molnar | ffbf670 | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 166 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
| 167 | |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 168 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
| 169 | { |
| 170 | int ret; |
| 171 | unsigned long flags; |
| 172 | |
| 173 | local_irq_save(flags); |
| 174 | ret = v->counter; |
| 175 | if (ret != u) |
| 176 | v->counter += a; |
| 177 | local_irq_restore(flags); |
| 178 | |
| 179 | return ret != u; |
| 180 | } |
| 181 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 182 | |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 183 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 185 | #ifdef CONFIG_CPU_SH4A |
| 186 | unsigned long tmp; |
| 187 | |
| 188 | __asm__ __volatile__ ( |
| 189 | "1: movli.l @%3, %0 ! atomic_clear_mask \n" |
| 190 | " and %2, %0 \n" |
| 191 | " movco.l %0, @%3 \n" |
| 192 | " bf 1b \n" |
| 193 | : "=&z" (tmp), "=r" (&v->counter) |
| 194 | : "r" (~mask), "r" (&v->counter) |
| 195 | : "t"); |
| 196 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | unsigned long flags; |
| 198 | |
| 199 | local_irq_save(flags); |
| 200 | *(long *)v &= ~mask; |
| 201 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 202 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 205 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | { |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 207 | #ifdef CONFIG_CPU_SH4A |
| 208 | unsigned long tmp; |
| 209 | |
| 210 | __asm__ __volatile__ ( |
| 211 | "1: movli.l @%3, %0 ! atomic_set_mask \n" |
| 212 | " or %2, %0 \n" |
| 213 | " movco.l %0, @%3 \n" |
| 214 | " bf 1b \n" |
| 215 | : "=&z" (tmp), "=r" (&v->counter) |
| 216 | : "r" (mask), "r" (&v->counter) |
| 217 | : "t"); |
| 218 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | unsigned long flags; |
| 220 | |
| 221 | local_irq_save(flags); |
| 222 | *(long *)v |= mask; |
| 223 | local_irq_restore(flags); |
Paul Mundt | 781125c | 2006-09-27 17:52:19 +0900 | [diff] [blame^] | 224 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | /* Atomic operations are already serializing on SH */ |
| 228 | #define smp_mb__before_atomic_dec() barrier() |
| 229 | #define smp_mb__after_atomic_dec() barrier() |
| 230 | #define smp_mb__before_atomic_inc() barrier() |
| 231 | #define smp_mb__after_atomic_inc() barrier() |
| 232 | |
Christoph Lameter | d3cb487 | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 233 | #include <asm-generic/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | #endif /* __ASM_SH_ATOMIC_H */ |