Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #ifndef _ASM_ARC_ATOMIC_H |
| 10 | #define _ASM_ARC_ATOMIC_H |
| 11 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 12 | #ifndef __ASSEMBLY__ |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/compiler.h> |
| 16 | #include <asm/cmpxchg.h> |
| 17 | #include <asm/barrier.h> |
| 18 | #include <asm/smp.h> |
| 19 | |
| 20 | #define atomic_read(v) ((v)->counter) |
| 21 | |
| 22 | #ifdef CONFIG_ARC_HAS_LLSC |
| 23 | |
| 24 | #define atomic_set(v, i) (((v)->counter) = (i)) |
| 25 | |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 26 | #ifdef CONFIG_ARC_STAR_9000923308 |
| 27 | |
| 28 | #define SCOND_FAIL_RETRY_VAR_DEF \ |
| 29 | unsigned int delay = 1, tmp; \ |
| 30 | |
| 31 | #define SCOND_FAIL_RETRY_ASM \ |
| 32 | " bz 4f \n" \ |
| 33 | " ; --- scond fail delay --- \n" \ |
| 34 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ |
| 35 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ |
| 36 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ |
Vineet Gupta | 1097163 | 2015-08-07 13:01:39 +0530 | [diff] [blame] | 37 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 38 | " b 1b \n" /* start over */ \ |
| 39 | "4: ; --- success --- \n" \ |
| 40 | |
| 41 | #define SCOND_FAIL_RETRY_VARS \ |
| 42 | ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ |
| 43 | |
| 44 | #else /* !CONFIG_ARC_STAR_9000923308 */ |
| 45 | |
| 46 | #define SCOND_FAIL_RETRY_VAR_DEF |
| 47 | |
| 48 | #define SCOND_FAIL_RETRY_ASM \ |
| 49 | " bnz 1b \n" \ |
| 50 | |
| 51 | #define SCOND_FAIL_RETRY_VARS |
| 52 | |
| 53 | #endif |
| 54 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 55 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 56 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 57 | { \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 58 | unsigned int val; \ |
| 59 | SCOND_FAIL_RETRY_VAR_DEF \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 60 | \ |
| 61 | __asm__ __volatile__( \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 62 | "1: llock %[val], [%[ctr]] \n" \ |
| 63 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 64 | " scond %[val], [%[ctr]] \n" \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 65 | " \n" \ |
| 66 | SCOND_FAIL_RETRY_ASM \ |
| 67 | \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 68 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 69 | SCOND_FAIL_RETRY_VARS \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 70 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ |
| 71 | [i] "ir" (i) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 72 | : "cc"); \ |
| 73 | } \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 74 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 75 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 76 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 77 | { \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 78 | unsigned int val; \ |
| 79 | SCOND_FAIL_RETRY_VAR_DEF \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 80 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 81 | /* \ |
| 82 | * Explicit full memory barrier needed before/after as \ |
| 83 | * LLOCK/SCOND thmeselves don't provide any such semantics \ |
| 84 | */ \ |
| 85 | smp_mb(); \ |
| 86 | \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 87 | __asm__ __volatile__( \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 88 | "1: llock %[val], [%[ctr]] \n" \ |
| 89 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 90 | " scond %[val], [%[ctr]] \n" \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 91 | " \n" \ |
| 92 | SCOND_FAIL_RETRY_ASM \ |
| 93 | \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 94 | : [val] "=&r" (val) \ |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 95 | SCOND_FAIL_RETRY_VARS \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 96 | : [ctr] "r" (&v->counter), \ |
| 97 | [i] "ir" (i) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 98 | : "cc"); \ |
| 99 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 100 | smp_mb(); \ |
| 101 | \ |
Vineet Gupta | 8ac0665 | 2015-07-21 12:05:42 +0300 | [diff] [blame] | 102 | return val; \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | #else /* !CONFIG_ARC_HAS_LLSC */ |
| 106 | |
| 107 | #ifndef CONFIG_SMP |
| 108 | |
| 109 | /* violating atomic_xxx API locking protocol in UP for optimization sake */ |
| 110 | #define atomic_set(v, i) (((v)->counter) = (i)) |
| 111 | |
| 112 | #else |
| 113 | |
| 114 | static inline void atomic_set(atomic_t *v, int i) |
| 115 | { |
| 116 | /* |
| 117 | * Independent of hardware support, all of the atomic_xxx() APIs need |
| 118 | * to follow the same locking rules to make sure that a "hardware" |
| 119 | * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn |
| 120 | * sequence |
| 121 | * |
| 122 | * Thus atomic_set() despite being 1 insn (and seemingly atomic) |
| 123 | * requires the locking. |
| 124 | */ |
| 125 | unsigned long flags; |
| 126 | |
| 127 | atomic_ops_lock(flags); |
| 128 | v->counter = i; |
| 129 | atomic_ops_unlock(flags); |
| 130 | } |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 131 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 132 | #endif |
| 133 | |
| 134 | /* |
| 135 | * Non hardware assisted Atomic-R-M-W |
| 136 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) |
| 137 | */ |
| 138 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 139 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 140 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 141 | { \ |
| 142 | unsigned long flags; \ |
| 143 | \ |
| 144 | atomic_ops_lock(flags); \ |
| 145 | v->counter c_op i; \ |
| 146 | atomic_ops_unlock(flags); \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 147 | } |
| 148 | |
Vineet Gupta | daaf40e | 2015-05-10 12:04:01 +0530 | [diff] [blame] | 149 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 150 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 151 | { \ |
| 152 | unsigned long flags; \ |
| 153 | unsigned long temp; \ |
| 154 | \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 155 | /* \ |
| 156 | * spin lock/unlock provides the needed smp_mb() before/after \ |
| 157 | */ \ |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 158 | atomic_ops_lock(flags); \ |
| 159 | temp = v->counter; \ |
| 160 | temp c_op i; \ |
| 161 | v->counter = temp; \ |
| 162 | atomic_ops_unlock(flags); \ |
| 163 | \ |
| 164 | return temp; \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
| 168 | |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 169 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
| 170 | ATOMIC_OP(op, c_op, asm_op) \ |
| 171 | ATOMIC_OP_RETURN(op, c_op, asm_op) |
| 172 | |
| 173 | ATOMIC_OPS(add, +=, add) |
| 174 | ATOMIC_OPS(sub, -=, sub) |
| 175 | ATOMIC_OP(and, &=, and) |
| 176 | |
| 177 | #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) |
| 178 | |
| 179 | #undef ATOMIC_OPS |
| 180 | #undef ATOMIC_OP_RETURN |
| 181 | #undef ATOMIC_OP |
Vineet Gupta | e78fdfe | 2015-07-14 19:50:18 +0530 | [diff] [blame] | 182 | #undef SCOND_FAIL_RETRY_VAR_DEF |
| 183 | #undef SCOND_FAIL_RETRY_ASM |
| 184 | #undef SCOND_FAIL_RETRY_VARS |
Peter Zijlstra | f7d11e9 | 2014-03-23 16:29:31 +0100 | [diff] [blame] | 185 | |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 186 | /** |
| 187 | * __atomic_add_unless - add unless the number is a given value |
| 188 | * @v: pointer of type atomic_t |
| 189 | * @a: the amount to add to v... |
| 190 | * @u: ...unless v is equal to u. |
| 191 | * |
| 192 | * Atomically adds @a to @v, so long as it was not @u. |
| 193 | * Returns the old value of @v |
| 194 | */ |
| 195 | #define __atomic_add_unless(v, a, u) \ |
| 196 | ({ \ |
| 197 | int c, old; \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 198 | \ |
| 199 | /* \ |
| 200 | * Explicit full memory barrier needed before/after as \ |
| 201 | * LLOCK/SCOND thmeselves don't provide any such semantics \ |
| 202 | */ \ |
| 203 | smp_mb(); \ |
| 204 | \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 205 | c = atomic_read(v); \ |
| 206 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ |
| 207 | c = old; \ |
Vineet Gupta | 2576c28 | 2014-11-20 15:42:09 +0530 | [diff] [blame] | 208 | \ |
| 209 | smp_mb(); \ |
| 210 | \ |
Vineet Gupta | 14e968b | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 211 | c; \ |
| 212 | }) |
| 213 | |
| 214 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 215 | |
| 216 | #define atomic_inc(v) atomic_add(1, v) |
| 217 | #define atomic_dec(v) atomic_sub(1, v) |
| 218 | |
| 219 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) |
| 220 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) |
| 221 | #define atomic_inc_return(v) atomic_add_return(1, (v)) |
| 222 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) |
| 223 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
| 224 | |
| 225 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) |
| 226 | |
| 227 | #define ATOMIC_INIT(i) { (i) } |
| 228 | |
| 229 | #include <asm-generic/atomic64.h> |
| 230 | |
| 231 | #endif |
| 232 | |
| 233 | #endif |