Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #ifndef _ASM_ARC_ATOMIC_H |
| 10 | #define _ASM_ARC_ATOMIC_H |
| 11 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 12 | #ifndef __ASSEMBLY__ |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/compiler.h> |
| 16 | #include <asm/cmpxchg.h> |
| 17 | #include <asm/barrier.h> |
| 18 | #include <asm/smp.h> |
| 19 | |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 20 | #ifndef CONFIG_ARC_PLAT_EZNPS |
| 21 | |
Peter Zijlstra | 62e8a32 | 2015-09-18 11:13:10 +0200 | [diff] [blame] | 22 | #define atomic_read(v) READ_ONCE((v)->counter) |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 23 | |
| 24 | #ifdef CONFIG_ARC_HAS_LLSC |
| 25 | |
Peter Zijlstra | 62e8a32 | 2015-09-18 11:13:10 +0200 | [diff] [blame] | 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 27 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 28 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 29 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 30 | { \ |
Vineet Gupta | ed6aefe | 2016-05-31 16:35:09 +0530 | [diff] [blame] | 31 | unsigned int val; \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 32 | \ |
| 33 | __asm__ __volatile__( \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 34 | "1: llock %[val], [%[ctr]] \n" \ |
| 35 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 36 | " scond %[val], [%[ctr]] \n" \ |
Vineet Gupta | ed6aefe | 2016-05-31 16:35:09 +0530 | [diff] [blame] | 37 | " bnz 1b \n" \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 38 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ |
| 39 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ |
| 40 | [i] "ir" (i) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 41 | : "cc"); \ |
| 42 | } \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 43 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 44 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 45 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 46 | { \ |
Vineet Gupta | ed6aefe | 2016-05-31 16:35:09 +0530 | [diff] [blame] | 47 | unsigned int val; \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 48 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 49 | /* \ |
| 50 | * Explicit full memory barrier needed before/after as \ |
| 51 | * LLOCK/SCOND thmeselves don't provide any such semantics \ |
| 52 | */ \ |
| 53 | smp_mb(); \ |
| 54 | \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 55 | __asm__ __volatile__( \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 56 | "1: llock %[val], [%[ctr]] \n" \ |
| 57 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 58 | " scond %[val], [%[ctr]] \n" \ |
Vineet Gupta | ed6aefe | 2016-05-31 16:35:09 +0530 | [diff] [blame] | 59 | " bnz 1b \n" \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 60 | : [val] "=&r" (val) \ |
| 61 | : [ctr] "r" (&v->counter), \ |
| 62 | [i] "ir" (i) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 63 | : "cc"); \ |
| 64 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 65 | smp_mb(); \ |
| 66 | \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 67 | return val; \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 68 | } |
| 69 | |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 70 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
| 71 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
| 72 | { \ |
| 73 | unsigned int val, orig; \ |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 74 | \ |
| 75 | /* \ |
| 76 | * Explicit full memory barrier needed before/after as \ |
| 77 | * LLOCK/SCOND thmeselves don't provide any such semantics \ |
| 78 | */ \ |
| 79 | smp_mb(); \ |
| 80 | \ |
| 81 | __asm__ __volatile__( \ |
| 82 | "1: llock %[orig], [%[ctr]] \n" \ |
| 83 | " " #asm_op " %[val], %[orig], %[i] \n" \ |
| 84 | " scond %[val], [%[ctr]] \n" \ |
| 85 | " \n" \ |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 86 | : [val] "=&r" (val), \ |
| 87 | [orig] "=&r" (orig) \ |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 88 | : [ctr] "r" (&v->counter), \ |
| 89 | [i] "ir" (i) \ |
| 90 | : "cc"); \ |
| 91 | \ |
| 92 | smp_mb(); \ |
| 93 | \ |
| 94 | return orig; \ |
| 95 | } |
| 96 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 97 | #else /* !CONFIG_ARC_HAS_LLSC */ |
| 98 | |
| 99 | #ifndef CONFIG_SMP |
| 100 | |
| 101 | /* violating atomic_xxx API locking protocol in UP for optimization sake */ |
Peter Zijlstra | 62e8a32 | 2015-09-18 11:13:10 +0200 | [diff] [blame] | 102 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 103 | |
| 104 | #else |
| 105 | |
| 106 | static inline void atomic_set(atomic_t *v, int i) |
| 107 | { |
| 108 | /* |
| 109 | * Independent of hardware support, all of the atomic_xxx() APIs need |
| 110 | * to follow the same locking rules to make sure that a "hardware" |
| 111 | * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn |
| 112 | * sequence |
| 113 | * |
| 114 | * Thus atomic_set() despite being 1 insn (and seemingly atomic) |
| 115 | * requires the locking. |
| 116 | */ |
| 117 | unsigned long flags; |
| 118 | |
| 119 | atomic_ops_lock(flags); |
Peter Zijlstra | 62e8a32 | 2015-09-18 11:13:10 +0200 | [diff] [blame] | 120 | WRITE_ONCE(v->counter, i); |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 121 | atomic_ops_unlock(flags); |
| 122 | } |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 123 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 124 | #endif |
| 125 | |
| 126 | /* |
| 127 | * Non hardware assisted Atomic-R-M-W |
| 128 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) |
| 129 | */ |
| 130 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 131 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 132 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 133 | { \ |
| 134 | unsigned long flags; \ |
| 135 | \ |
| 136 | atomic_ops_lock(flags); \ |
| 137 | v->counter c_op i; \ |
| 138 | atomic_ops_unlock(flags); \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 139 | } |
| 140 | |
Vineet Gupta | daaf40e | 2015-05-10 12:04:01 +0530 | [diff] [blame] | 141 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 142 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 143 | { \ |
| 144 | unsigned long flags; \ |
| 145 | unsigned long temp; \ |
| 146 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 147 | /* \ |
| 148 | * spin lock/unlock provides the needed smp_mb() before/after \ |
| 149 | */ \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 150 | atomic_ops_lock(flags); \ |
| 151 | temp = v->counter; \ |
| 152 | temp c_op i; \ |
| 153 | v->counter = temp; \ |
| 154 | atomic_ops_unlock(flags); \ |
| 155 | \ |
| 156 | return temp; \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 157 | } |
| 158 | |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 159 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
| 160 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
| 161 | { \ |
| 162 | unsigned long flags; \ |
| 163 | unsigned long orig; \ |
| 164 | \ |
| 165 | /* \ |
| 166 | * spin lock/unlock provides the needed smp_mb() before/after \ |
| 167 | */ \ |
| 168 | atomic_ops_lock(flags); \ |
| 169 | orig = v->counter; \ |
| 170 | v->counter c_op i; \ |
| 171 | atomic_ops_unlock(flags); \ |
| 172 | \ |
| 173 | return orig; \ |
| 174 | } |
| 175 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 176 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
| 177 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 178 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 179 | ATOMIC_OP(op, c_op, asm_op) \ |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 180 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 181 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 182 | |
| 183 | ATOMIC_OPS(add, +=, add) |
| 184 | ATOMIC_OPS(sub, -=, sub) |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 185 | |
Peter Zijlstra | cda7e41 | 2014-04-23 20:06:20 +0200 | [diff] [blame] | 186 | #define atomic_andnot atomic_andnot |
| 187 | |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 188 | #undef ATOMIC_OPS |
| 189 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 190 | ATOMIC_OP(op, c_op, asm_op) \ |
| 191 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
| 192 | |
| 193 | ATOMIC_OPS(and, &=, and) |
| 194 | ATOMIC_OPS(andnot, &= ~, bic) |
| 195 | ATOMIC_OPS(or, |=, or) |
| 196 | ATOMIC_OPS(xor, ^=, xor) |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 197 | |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 198 | #else /* CONFIG_ARC_PLAT_EZNPS */ |
| 199 | |
| 200 | static inline int atomic_read(const atomic_t *v) |
| 201 | { |
| 202 | int temp; |
| 203 | |
| 204 | __asm__ __volatile__( |
| 205 | " ld.di %0, [%1]" |
| 206 | : "=r"(temp) |
| 207 | : "r"(&v->counter) |
| 208 | : "memory"); |
| 209 | return temp; |
| 210 | } |
| 211 | |
| 212 | static inline void atomic_set(atomic_t *v, int i) |
| 213 | { |
| 214 | __asm__ __volatile__( |
| 215 | " st.di %0,[%1]" |
| 216 | : |
| 217 | : "r"(i), "r"(&v->counter) |
| 218 | : "memory"); |
| 219 | } |
| 220 | |
| 221 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 222 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 223 | { \ |
| 224 | __asm__ __volatile__( \ |
| 225 | " mov r2, %0\n" \ |
| 226 | " mov r3, %1\n" \ |
| 227 | " .word %2\n" \ |
| 228 | : \ |
| 229 | : "r"(i), "r"(&v->counter), "i"(asm_op) \ |
| 230 | : "r2", "r3", "memory"); \ |
| 231 | } \ |
| 232 | |
| 233 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 234 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 235 | { \ |
| 236 | unsigned int temp = i; \ |
| 237 | \ |
| 238 | /* Explicit full memory barrier needed before/after */ \ |
| 239 | smp_mb(); \ |
| 240 | \ |
| 241 | __asm__ __volatile__( \ |
| 242 | " mov r2, %0\n" \ |
| 243 | " mov r3, %1\n" \ |
| 244 | " .word %2\n" \ |
| 245 | " mov %0, r2" \ |
| 246 | : "+r"(temp) \ |
| 247 | : "r"(&v->counter), "i"(asm_op) \ |
| 248 | : "r2", "r3", "memory"); \ |
| 249 | \ |
| 250 | smp_mb(); \ |
| 251 | \ |
| 252 | temp c_op i; \ |
| 253 | \ |
| 254 | return temp; \ |
| 255 | } |
| 256 | |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 257 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ |
| 258 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
| 259 | { \ |
| 260 | unsigned int temp = i; \ |
| 261 | \ |
| 262 | /* Explicit full memory barrier needed before/after */ \ |
| 263 | smp_mb(); \ |
| 264 | \ |
| 265 | __asm__ __volatile__( \ |
| 266 | " mov r2, %0\n" \ |
| 267 | " mov r3, %1\n" \ |
| 268 | " .word %2\n" \ |
| 269 | " mov %0, r2" \ |
| 270 | : "+r"(temp) \ |
| 271 | : "r"(&v->counter), "i"(asm_op) \ |
| 272 | : "r2", "r3", "memory"); \ |
| 273 | \ |
| 274 | smp_mb(); \ |
| 275 | \ |
| 276 | return temp; \ |
| 277 | } |
| 278 | |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 279 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 280 | ATOMIC_OP(op, c_op, asm_op) \ |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 281 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 282 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 283 | |
| 284 | ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) |
| 285 | #define atomic_sub(i, v) atomic_add(-(i), (v)) |
| 286 | #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) |
| 287 | |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 288 | #undef ATOMIC_OPS |
| 289 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 290 | ATOMIC_OP(op, c_op, asm_op) \ |
| 291 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
| 292 | |
| 293 | ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 294 | #define atomic_andnot(mask, v) atomic_and(~(mask), (v)) |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 295 | ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) |
| 296 | ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 297 | |
| 298 | #endif /* CONFIG_ARC_PLAT_EZNPS */ |
| 299 | |
| 300 | #undef ATOMIC_OPS |
Peter Zijlstra | fbffe89 | 2016-04-18 01:16:09 +0200 | [diff] [blame] | 301 | #undef ATOMIC_FETCH_OP |
Noam Camus | a5a10d9 | 2015-05-16 17:49:35 +0300 | [diff] [blame] | 302 | #undef ATOMIC_OP_RETURN |
| 303 | #undef ATOMIC_OP |
| 304 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 305 | /** |
| 306 | * __atomic_add_unless - add unless the number is a given value |
| 307 | * @v: pointer of type atomic_t |
| 308 | * @a: the amount to add to v... |
| 309 | * @u: ...unless v is equal to u. |
| 310 | * |
| 311 | * Atomically adds @a to @v, so long as it was not @u. |
| 312 | * Returns the old value of @v |
| 313 | */ |
| 314 | #define __atomic_add_unless(v, a, u) \ |
| 315 | ({ \ |
| 316 | int c, old; \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 317 | \ |
| 318 | /* \ |
| 319 | * Explicit full memory barrier needed before/after as \ |
| 320 | * LLOCK/SCOND thmeselves don't provide any such semantics \ |
| 321 | */ \ |
| 322 | smp_mb(); \ |
| 323 | \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 324 | c = atomic_read(v); \ |
| 325 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ |
| 326 | c = old; \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 327 | \ |
| 328 | smp_mb(); \ |
| 329 | \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 330 | c; \ |
| 331 | }) |
| 332 | |
| 333 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 334 | |
| 335 | #define atomic_inc(v) atomic_add(1, v) |
| 336 | #define atomic_dec(v) atomic_sub(1, v) |
| 337 | |
| 338 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) |
| 339 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) |
| 340 | #define atomic_inc_return(v) atomic_add_return(1, (v)) |
| 341 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) |
| 342 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
| 343 | |
| 344 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) |
| 345 | |
| 346 | #define ATOMIC_INIT(i) { (i) } |
| 347 | |
| 348 | #include <asm-generic/atomic64.h> |
| 349 | |
| 350 | #endif |
| 351 | |
| 352 | #endif |